diff --git a/packages/autorest.typescript/src/utils/schemaHelpers.ts b/packages/autorest.typescript/src/utils/schemaHelpers.ts index 491fc60f5b..dac9866424 100644 --- a/packages/autorest.typescript/src/utils/schemaHelpers.ts +++ b/packages/autorest.typescript/src/utils/schemaHelpers.ts @@ -233,8 +233,10 @@ export function getSecurityInfoFromModel(security: Security) { const { addCredentials } = getAutorestOptions(); const credentialScopes: Set = new Set(); let credentialKeyHeaderName: string = ""; + let hasOAuth2Defined = false; for (const securitySchema of security.schemes) { if (securitySchema.type === "OAuth2") { + hasOAuth2Defined = true; (securitySchema as OAuth2SecurityScheme).scopes.forEach(scope => { const scopes = scope.split(","); for (const scope of scopes) { @@ -273,7 +275,8 @@ export function getSecurityInfoFromModel(security: Security) { } return { addCredentials: refinedAddCredentials, - credentialScopes: scopes, + credentialScopes: + !hasOAuth2Defined && scopes.length === 0 ? undefined : scopes, credentialKeyHeaderName: credentialKeyHeaderName }; } diff --git a/packages/autorest.typescript/test/rlcIntegration/generated/multipleUrlParameters/src/multipleUrlParameterRestClient.ts b/packages/autorest.typescript/test/rlcIntegration/generated/multipleUrlParameters/src/multipleUrlParameterRestClient.ts index 47531217b9..701ab5a43b 100644 --- a/packages/autorest.typescript/test/rlcIntegration/generated/multipleUrlParameters/src/multipleUrlParameterRestClient.ts +++ b/packages/autorest.typescript/test/rlcIntegration/generated/multipleUrlParameters/src/multipleUrlParameterRestClient.ts @@ -23,13 +23,6 @@ export default function createClient( options.baseUrl ?? `${endpoint}/catalog/api/atlas/${serviceVersion}/{accountName}`; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? ["user_impersonation"] - } - }; - const userAgentInfo = `azsdk-js-multiple-url-parameter-rest/1.0.0-preview1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -42,6 +35,9 @@ export default function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + scopes: options.credentials?.scopes ?? ["user_impersonation"] } }; diff --git a/packages/autorest.typescript/test/rlcIntegration/generated/securityAADRest/src/securityAADRestClient.ts b/packages/autorest.typescript/test/rlcIntegration/generated/securityAADRest/src/securityAADRestClient.ts index 355d3d8252..dc151963cc 100644 --- a/packages/autorest.typescript/test/rlcIntegration/generated/securityAADRest/src/securityAADRestClient.ts +++ b/packages/autorest.typescript/test/rlcIntegration/generated/securityAADRest/src/securityAADRestClient.ts @@ -16,15 +16,6 @@ export default function createClient( options: ClientOptions = {} ): SecurityAADRestClient { const baseUrl = options.baseUrl ?? `http://localhost:3000`; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://security.microsoft.com/.default" - ] - } - }; - const userAgentInfo = `azsdk-js-security-aad-rest/1.0.0-preview1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -37,6 +28,11 @@ export default function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://security.microsoft.com/.default" + ] } }; diff --git a/packages/autorest.typescript/test/rlcIntegration/generated/securityKeyRest/src/securityKeyRestClient.ts b/packages/autorest.typescript/test/rlcIntegration/generated/securityKeyRest/src/securityKeyRestClient.ts index 3acc6b4b68..6a15c4f337 100644 --- a/packages/autorest.typescript/test/rlcIntegration/generated/securityKeyRest/src/securityKeyRestClient.ts +++ b/packages/autorest.typescript/test/rlcIntegration/generated/securityKeyRest/src/securityKeyRestClient.ts @@ -16,13 +16,6 @@ export default function createClient( options: ClientOptions = {} ): SecurityKeyRestClient { const baseUrl = options.baseUrl ?? `http://localhost:3000`; - options = { - ...options, - credentials: { - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "security-key" - } - }; - const userAgentInfo = `azsdk-js-security-key-rest/1.0.0-preview1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -35,6 +28,9 @@ export default function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "security-key" } }; diff --git a/packages/autorest.typescript/test/smoke/generated/agrifood-data-plane/src/azureAgriFoodPlatformDataPlaneService.ts b/packages/autorest.typescript/test/smoke/generated/agrifood-data-plane/src/azureAgriFoodPlatformDataPlaneService.ts index e57550ade0..8b07bd4042 100644 --- a/packages/autorest.typescript/test/smoke/generated/agrifood-data-plane/src/azureAgriFoodPlatformDataPlaneService.ts +++ b/packages/autorest.typescript/test/smoke/generated/agrifood-data-plane/src/azureAgriFoodPlatformDataPlaneService.ts @@ -19,13 +19,6 @@ export default function createClient( ): AzureAgriFoodPlatformDataPlaneServiceClient { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "2021-03-31-preview"; - options = { - ...options, - credentials: { - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "Authorization" - } - }; - const userAgentInfo = `azsdk-js-agrifood-data-plane-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -38,6 +31,9 @@ export default function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "Authorization" } }; diff --git a/packages/autorest.typescript/test/smoke/generated/anomaly-detector-mv-rest/src/anomalyDetectorMV.ts b/packages/autorest.typescript/test/smoke/generated/anomaly-detector-mv-rest/src/anomalyDetectorMV.ts index 54693594b0..961919b082 100644 --- a/packages/autorest.typescript/test/smoke/generated/anomaly-detector-mv-rest/src/anomalyDetectorMV.ts +++ b/packages/autorest.typescript/test/smoke/generated/anomaly-detector-mv-rest/src/anomalyDetectorMV.ts @@ -25,14 +25,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/anomalydetector/${apiVersion}`; - options = { - ...options, - credentials: { - apiKeyHeaderName: - options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key" - } - }; - const userAgentInfo = `azsdk-js-anomaly-detector-mv-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -45,6 +37,10 @@ export default function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + apiKeyHeaderName: + options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key" } }; diff --git a/packages/autorest.typescript/test/smoke/generated/anomaly-detector-rest/src/anomalyDetectorRest.ts b/packages/autorest.typescript/test/smoke/generated/anomaly-detector-rest/src/anomalyDetectorRest.ts index 1a0f3d5234..7df8e81e3a 100644 --- a/packages/autorest.typescript/test/smoke/generated/anomaly-detector-rest/src/anomalyDetectorRest.ts +++ b/packages/autorest.typescript/test/smoke/generated/anomaly-detector-rest/src/anomalyDetectorRest.ts @@ -22,14 +22,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/anomalydetector/${apiVersion}`; - options = { - ...options, - credentials: { - apiKeyHeaderName: - options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key" - } - }; - const userAgentInfo = `azsdk-js-anomaly-detector-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -42,6 +34,10 @@ export default function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + apiKeyHeaderName: + options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key" } }; diff --git a/packages/autorest.typescript/test/smoke/generated/purview-administration-rest/src/account/purviewAccount.ts b/packages/autorest.typescript/test/smoke/generated/purview-administration-rest/src/account/purviewAccount.ts index ea4d8c040f..a8b1b50c92 100644 --- a/packages/autorest.typescript/test/smoke/generated/purview-administration-rest/src/account/purviewAccount.ts +++ b/packages/autorest.typescript/test/smoke/generated/purview-administration-rest/src/account/purviewAccount.ts @@ -19,15 +19,6 @@ export function createClient( ): PurviewAccountClient { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "2019-11-01-preview"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://purview.azure.net/.default" - ] - } - }; - const userAgentInfo = `azsdk-js-purview-administration-rest/1.0.0-beta.2`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -40,6 +31,11 @@ export function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://purview.azure.net/.default" + ] } }; diff --git a/packages/autorest.typescript/test/smoke/generated/purview-administration-rest/src/metadataPolicies/purviewMetadataPolicies.ts b/packages/autorest.typescript/test/smoke/generated/purview-administration-rest/src/metadataPolicies/purviewMetadataPolicies.ts index cc97d2e98a..3e6da7bba7 100644 --- a/packages/autorest.typescript/test/smoke/generated/purview-administration-rest/src/metadataPolicies/purviewMetadataPolicies.ts +++ b/packages/autorest.typescript/test/smoke/generated/purview-administration-rest/src/metadataPolicies/purviewMetadataPolicies.ts @@ -19,13 +19,6 @@ export function createClient( ): PurviewMetadataPoliciesClient { const baseUrl = options.baseUrl ?? `${endpoint}/policyStore`; options.apiVersion = options.apiVersion ?? "2021-07-01-preview"; - options = { - ...options, - credentials: { - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "CustomAuth" - } - }; - const userAgentInfo = `azsdk-js-purview-administration-rest/1.0.0-beta.2`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -38,6 +31,9 @@ export function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "CustomAuth" } }; diff --git a/packages/autorest.typescript/test/smoke/generated/synapse-artifacts-rest/src/synapseArtifacts.ts b/packages/autorest.typescript/test/smoke/generated/synapse-artifacts-rest/src/synapseArtifacts.ts index 7e13398657..1281360df5 100644 --- a/packages/autorest.typescript/test/smoke/generated/synapse-artifacts-rest/src/synapseArtifacts.ts +++ b/packages/autorest.typescript/test/smoke/generated/synapse-artifacts-rest/src/synapseArtifacts.ts @@ -19,15 +19,6 @@ export default function createClient( ): SynapseArtifactsClient { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "2021-11-01-preview"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://dev.azuresynapse.net/.default" - ] - } - }; - const userAgentInfo = `azsdk-js-synapse-artifacts-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -40,6 +31,11 @@ export default function createClient( }, loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://dev.azuresynapse.net/.default" + ] } }; diff --git a/packages/rlc-common/src/buildClient.ts b/packages/rlc-common/src/buildClient.ts index 33b4a1f2a7..b77c06e3e7 100644 --- a/packages/rlc-common/src/buildClient.ts +++ b/packages/rlc-common/src/buildClient.ts @@ -86,8 +86,7 @@ export function buildClient(model: RLCModel): File | undefined { credentialKeyHeaderName, customHttpAuthHeaderName } = model.options; - const credentialTypes = - credentialScopes && credentialScopes.length > 0 ? ["TokenCredential"] : []; + const credentialTypes = credentialScopes ? ["TokenCredential"] : []; if (credentialKeyHeaderName || customHttpAuthHeaderName) { credentialTypes.push("KeyCredential"); @@ -211,9 +210,7 @@ function isSecurityInfoDefined( customHttpAuthHeaderName?: string ) { return ( - (credentialScopes && credentialScopes.length > 0) || - credentialKeyHeaderName || - customHttpAuthHeaderName + credentialScopes || credentialKeyHeaderName || customHttpAuthHeaderName ); } @@ -293,16 +290,6 @@ export function getClientFactoryBody( }` : ""; - const overrideOptionsStatement = `options = { - ...options, - userAgentOptions: { - userAgentPrefix - }, - loggingOptions: { - logger: options.loggingOptions?.logger ?? logger.info - }${customHeaderOptions} - }`; - const baseUrlStatement: VariableStatementStructure = { kind: StructureKind.VariableStatement, declarationKind: VariableDeclarationKind.Const, @@ -310,11 +297,10 @@ export function getClientFactoryBody( }; const { credentialScopes, credentialKeyHeaderName } = model.options; - - const scopesString = - credentialScopes && credentialScopes.length - ? credentialScopes.map((cs) => `"${cs}"`).join(", ") - : ""; + const scopesString = credentialScopes + ? credentialScopes.map((cs) => `"${cs}"`).join(", ") || + "`${baseUrl}/.default`" + : ""; const scopes = scopesString ? `scopes: options.credentials?.scopes ?? [${scopesString}],` : ""; @@ -323,19 +309,26 @@ export function getClientFactoryBody( ? `apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "${credentialKeyHeaderName}",` : ""; - const credentials = + const credentialsOptions = scopes || apiKeyHeaderName - ? `options = { - ...options, - credentials: { - ${scopes} - ${apiKeyHeaderName} - }, + ? `, + credentials: { + ${scopes} + ${apiKeyHeaderName} }` : ""; + const overrideOptionsStatement = `options = { + ...options, + userAgentOptions: { + userAgentPrefix + }, + loggingOptions: { + logger: options.loggingOptions?.logger ?? logger.info + }${customHeaderOptions}${credentialsOptions} + }`; const getClient = `const client = getClient( - baseUrl, ${credentials ? "credentials," : ""} options + baseUrl, ${credentialsOptions ? "credentials," : ""} options ) as ${clientTypeName}; `; const { customHttpAuthHeaderName, customHttpAuthSharedKeyPrefix } = @@ -372,7 +365,6 @@ export function getClientFactoryBody( ...optionalUrlParameters, baseUrlStatement, apiVersionStatement, - credentials, userAgentInfoStatement, userAgentStatement, overrideOptionsStatement, diff --git a/packages/rlc-common/src/interfaces.ts b/packages/rlc-common/src/interfaces.ts index c37633a740..2b15d1cbfa 100644 --- a/packages/rlc-common/src/interfaces.ts +++ b/packages/rlc-common/src/interfaces.ts @@ -150,6 +150,11 @@ export interface RLCOptions { batch?: any[]; packageDetails?: PackageDetails; addCredentials?: boolean; + /** Three possiblie values: + * - undefined, no credentialScopes and relevant settings would be generated + * - [], which means we would generate TokenCredential but no credentialScopes and relevant settings + * - ["..."], which means we would generate credentialScopes and relevant settings with the given values + */ credentialScopes?: string[]; credentialKeyHeaderName?: string; customHttpAuthHeaderName?: string; diff --git a/packages/rlc-common/src/transformSampleGroups.ts b/packages/rlc-common/src/transformSampleGroups.ts index cac59c9d72..2dd7685f21 100644 --- a/packages/rlc-common/src/transformSampleGroups.ts +++ b/packages/rlc-common/src/transformSampleGroups.ts @@ -213,9 +213,7 @@ function convertClientLevelParameters( model.options; const hasUrlParameter = !!urlParameters, hasCredentials = - addCredentials && - ((credentialScopes && credentialScopes.length > 0) || - credentialKeyHeaderName); + addCredentials && (credentialScopes || credentialKeyHeaderName); if (hasUrlParameter) { // convert the host parameters in url diff --git a/packages/typespec-test/test/anomalyDetector/generated/typespec-ts/src/anomalyDetectorClient.ts b/packages/typespec-test/test/anomalyDetector/generated/typespec-ts/src/anomalyDetectorClient.ts index 63cdc7dcf1..9f5712b2d3 100644 --- a/packages/typespec-test/test/anomalyDetector/generated/typespec-ts/src/anomalyDetectorClient.ts +++ b/packages/typespec-test/test/anomalyDetector/generated/typespec-ts/src/anomalyDetectorClient.ts @@ -26,14 +26,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/anomalydetector/${apiVersion}`; - options = { - ...options, - credentials: { - apiKeyHeaderName: - options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", - }, - }; - const userAgentInfo = `azsdk-js-ai-anomaly-detector-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -47,6 +39,10 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + apiKeyHeaderName: + options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", + }, }; const client = getClient( diff --git a/packages/typespec-test/test/authoring/generated/typespec-ts/src/authoringClient.ts b/packages/typespec-test/test/authoring/generated/typespec-ts/src/authoringClient.ts index 989918d72a..4537f0889b 100644 --- a/packages/typespec-test/test/authoring/generated/typespec-ts/src/authoringClient.ts +++ b/packages/typespec-test/test/authoring/generated/typespec-ts/src/authoringClient.ts @@ -19,14 +19,6 @@ export default function createClient( ): AuthoringClient { const baseUrl = options.baseUrl ?? `${endpoint}/language`; options.apiVersion = options.apiVersion ?? "202ß2-05-15-preview"; - options = { - ...options, - credentials: { - apiKeyHeaderName: - options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", - }, - }; - const userAgentInfo = `azsdk-js-authoring-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -40,6 +32,10 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + apiKeyHeaderName: + options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", + }, }; const client = getClient(baseUrl, credentials, options) as AuthoringClient; diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/review/batch.api.md b/packages/typespec-test/test/batch/generated/typespec-ts/review/batch.api.md deleted file mode 100644 index c455cee528..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/review/batch.api.md +++ /dev/null @@ -1,7556 +0,0 @@ -## API Report File for "@azure-rest/batch" - -> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). - -```ts - -import { Client } from '@azure-rest/core-client'; -import { ClientOptions } from '@azure-rest/core-client'; -import { ErrorResponse } from '@azure-rest/core-client'; -import { HttpResponse } from '@azure-rest/core-client'; -import { Paged } from '@azure/core-paging'; -import { PagedAsyncIterableIterator } from '@azure/core-paging'; -import { PathUncheckedResponse } from '@azure-rest/core-client'; -import { RawHttpHeaders } from '@azure/core-rest-pipeline'; -import { RawHttpHeadersInput } from '@azure/core-rest-pipeline'; -import { RequestParameters } from '@azure-rest/core-client'; -import { StreamableMethod } from '@azure-rest/core-client'; -import { TokenCredential } from '@azure/core-auth'; - -// @public (undocumented) -export interface AccountListPoolNodeCounts { - get(options?: AccountListPoolNodeCountsParameters): StreamableMethod; -} - -// @public (undocumented) -export interface AccountListPoolNodeCounts200Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface AccountListPoolNodeCounts200Response extends HttpResponse { - // (undocumented) - body: PoolNodeCountsListResultOutput; - // (undocumented) - headers: RawHttpHeaders & AccountListPoolNodeCounts200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface AccountListPoolNodeCountsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface AccountListPoolNodeCountsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & AccountListPoolNodeCountsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface AccountListPoolNodeCountsHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & AccountListPoolNodeCountsHeaders; -} - -// @public (undocumented) -export interface AccountListPoolNodeCountsHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type AccountListPoolNodeCountsParameters = AccountListPoolNodeCountsQueryParam & AccountListPoolNodeCountsHeaderParam & RequestParameters; - -// @public (undocumented) -export interface AccountListPoolNodeCountsQueryParam { - // (undocumented) - queryParameters?: AccountListPoolNodeCountsQueryParamProperties; -} - -// @public (undocumented) -export interface AccountListPoolNodeCountsQueryParamProperties { - $filter?: string; - maxresults?: number; - timeOut?: number; -} - -// @public (undocumented) -export interface AccountListSupportedImages { - get(options?: AccountListSupportedImagesParameters): StreamableMethod; -} - -// @public (undocumented) -export interface AccountListSupportedImages200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface AccountListSupportedImages200Response extends HttpResponse { - // (undocumented) - body: AccountListSupportedImagesResultOutput; - // (undocumented) - headers: RawHttpHeaders & AccountListSupportedImages200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface AccountListSupportedImagesDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface AccountListSupportedImagesDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & AccountListSupportedImagesDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface AccountListSupportedImagesHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & AccountListSupportedImagesHeaders; -} - -// @public (undocumented) -export interface AccountListSupportedImagesHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type AccountListSupportedImagesParameters = AccountListSupportedImagesQueryParam & AccountListSupportedImagesHeaderParam & RequestParameters; - -// @public (undocumented) -export interface AccountListSupportedImagesQueryParam { - // (undocumented) - queryParameters?: AccountListSupportedImagesQueryParamProperties; -} - -// @public (undocumented) -export interface AccountListSupportedImagesQueryParamProperties { - $filter?: string; - maxresults?: number; - timeOut?: number; -} - -// @public -export interface AccountListSupportedImagesResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface AffinityInformation { - affinityId: string; -} - -// @public -export interface AffinityInformationOutput { - affinityId: string; -} - -// @public -export interface ApplicationListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public (undocumented) -export interface ApplicationOperationsGet { - get(options?: ApplicationOperationsGetParameters): StreamableMethod; -} - -// @public -export interface ApplicationOperationsGet200Response extends HttpResponse { - // (undocumented) - body: ApplicationOutput; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ApplicationOperationsGetDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ApplicationOperationsGetDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ApplicationOperationsGetDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export type ApplicationOperationsGetParameters = RequestParameters; - -// @public (undocumented) -export interface ApplicationOperationsList { - get(options?: ApplicationOperationsListParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ApplicationOperationsList200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface ApplicationOperationsList200Response extends HttpResponse { - // (undocumented) - body: ApplicationListResultOutput; - // (undocumented) - headers: RawHttpHeaders & ApplicationOperationsList200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ApplicationOperationsListDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ApplicationOperationsListDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ApplicationOperationsListDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ApplicationOperationsListHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ApplicationOperationsListHeaders; -} - -// @public (undocumented) -export interface ApplicationOperationsListHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ApplicationOperationsListParameters = ApplicationOperationsListQueryParam & ApplicationOperationsListHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ApplicationOperationsListQueryParam { - // (undocumented) - queryParameters?: ApplicationOperationsListQueryParamProperties; -} - -// @public (undocumented) -export interface ApplicationOperationsListQueryParamProperties { - maxresults?: number; - timeOut?: number; -} - -// @public -export interface ApplicationOutput { - displayName: string; - readonly id: string; - versions: string[]; -} - -// @public -export interface ApplicationPackageReference { - applicationId: string; - version?: string; -} - -// @public -export interface ApplicationPackageReferenceOutput { - applicationId: string; - version?: string; -} - -// @public -export interface AuthenticationTokenSettings { - access?: string[]; -} - -// @public -export interface AuthenticationTokenSettingsOutput { - access?: string[]; -} - -// @public -export interface AutoPoolSpecification { - autoPoolIdPrefix?: string; - keepAlive?: boolean; - pool?: PoolSpecification; - poolLifetimeOption: string; -} - -// @public -export interface AutoPoolSpecificationOutput { - autoPoolIdPrefix?: string; - keepAlive?: boolean; - pool?: PoolSpecificationOutput; - poolLifetimeOption: string; -} - -// @public -export interface AutoScaleRun { - error?: AutoScaleRunError; - results?: string; -} - -// @public -export interface AutoScaleRunError { - code?: string; - message?: string; - values?: Array; -} - -// @public -export interface AutoScaleRunErrorOutput { - code?: string; - message?: string; - values?: Array; -} - -// @public -export interface AutoScaleRunOutput { - error?: AutoScaleRunErrorOutput; - results?: string; - readonly timestamp: string; -} - -// @public -export interface AutoUserSpecification { - elevationLevel?: string; - scope?: string; -} - -// @public -export interface AutoUserSpecificationOutput { - elevationLevel?: string; - scope?: string; -} - -// @public -export interface AzureBlobFileSystemConfiguration { - accountKey?: string; - accountName: string; - blobfuseOptions?: string; - containerName: string; - identityReference?: ComputeNodeIdentityReference; - relativeMountPath: string; - sasKey?: string; -} - -// @public -export interface AzureBlobFileSystemConfigurationOutput { - accountKey?: string; - accountName: string; - blobfuseOptions?: string; - containerName: string; - identityReference?: ComputeNodeIdentityReferenceOutput; - relativeMountPath: string; - sasKey?: string; -} - -// @public -export interface AzureFileShareConfiguration { - accountKey: string; - accountName: string; - azureFileUrl: string; - mountOptions?: string; - relativeMountPath: string; -} - -// @public -export interface AzureFileShareConfigurationOutput { - accountKey: string; - accountName: string; - azureFileUrl: string; - mountOptions?: string; - relativeMountPath: string; -} - -// @public -export interface BatchErrorDetailOutput { - key?: string; - value?: string; -} - -// @public -export interface BatchErrorOutput { - code?: string; - message?: ErrorMessageOutput; - values?: Array; -} - -// @public -export interface BatchJob { - allowTaskPreemption?: boolean; - commonEnvironmentSettings?: Array; - constraints?: JobConstraints; - displayName?: string; - id?: string; - jobManagerTask?: JobManagerTask; - jobPreparationTask?: JobPreparationTask; - jobReleaseTask?: JobReleaseTask; - maxParallelTasks?: number; - metadata?: Array; - networkConfiguration?: JobNetworkConfiguration; - onAllTasksComplete?: string; - onTaskFailure?: string; - poolInfo?: PoolInformation; - priority?: number; - usesTaskDependencies?: boolean; -} - -// @public -export interface BatchJobDisableParameters { - disableTasks: string; -} - -// @public -export interface BatchJobListPreparationAndReleaseTaskStatusResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface BatchJobListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface BatchJobOutput { - allowTaskPreemption?: boolean; - commonEnvironmentSettings?: Array; - constraints?: JobConstraintsOutput; - readonly creationTime?: string; - displayName?: string; - readonly eTag?: string; - readonly executionInfo?: JobExecutionInformationOutput; - id?: string; - jobManagerTask?: JobManagerTaskOutput; - jobPreparationTask?: JobPreparationTaskOutput; - jobReleaseTask?: JobReleaseTaskOutput; - readonly lastModified?: string; - maxParallelTasks?: number; - metadata?: Array; - networkConfiguration?: JobNetworkConfigurationOutput; - onAllTasksComplete?: string; - onTaskFailure?: string; - poolInfo?: PoolInformationOutput; - readonly previousState?: string; - readonly previousStateTransitionTime?: string; - priority?: number; - readonly state?: string; - readonly stateTransitionTime?: string; - readonly stats?: JobStatisticsOutput; - readonly url?: string; - usesTaskDependencies?: boolean; -} - -// @public -export interface BatchJobSchedule { - displayName?: string; - id?: string; - jobSpecification?: JobSpecification; - metadata?: Array; - schedule?: Schedule; -} - -// @public -export interface BatchJobScheduleListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface BatchJobScheduleOutput { - readonly creationTime?: string; - displayName?: string; - readonly eTag?: string; - readonly executionInfo?: JobScheduleExecutionInformationOutput; - id?: string; - jobSpecification?: JobSpecificationOutput; - readonly lastModified?: string; - metadata?: Array; - readonly previousState?: string; - readonly previousStateTransitionTime?: string; - schedule?: ScheduleOutput; - readonly state?: string; - readonly stateTransitionTime?: string; - readonly stats?: JobScheduleStatisticsOutput; - readonly url?: string; -} - -// @public -export interface BatchJobTerminateParameters { - terminateReason?: string; -} - -// @public -export interface BatchPool { - applicationLicenses?: string[]; - applicationPackageReferences?: Array; - autoScaleEvaluationInterval?: string; - autoScaleFormula?: string; - certificateReferences?: Array; - cloudServiceConfiguration?: CloudServiceConfiguration; - displayName?: string; - enableAutoScale?: boolean; - enableInterNodeCommunication?: boolean; - id?: string; - metadata?: Array; - mountConfiguration?: Array; - networkConfiguration?: NetworkConfiguration; - resizeTimeout?: string; - startTask?: StartTask; - targetDedicatedNodes?: number; - targetLowPriorityNodes?: number; - targetNodeCommunicationMode?: string; - taskSchedulingPolicy?: TaskSchedulingPolicy; - taskSlotsPerNode?: number; - userAccounts?: Array; - virtualMachineConfiguration?: VirtualMachineConfiguration; - vmSize?: string; -} - -// @public -export interface BatchPoolEnableAutoScaleParameters { - autoScaleEvaluationInterval?: string; - autoScaleFormula?: string; -} - -// @public -export interface BatchPoolEvaluateAutoScaleParameters { - autoScaleFormula: string; -} - -// @public -export interface BatchPoolIdentity { - type: string; - userAssignedIdentities?: Array; -} - -// @public -export interface BatchPoolIdentityOutput { - type: string; - userAssignedIdentities?: Array; -} - -// @public -export interface BatchPoolListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface BatchPoolOutput { - readonly allocationState?: string; - readonly allocationStateTransitionTime?: string; - applicationLicenses?: string[]; - applicationPackageReferences?: Array; - autoScaleEvaluationInterval?: string; - autoScaleFormula?: string; - readonly autoScaleRun?: AutoScaleRunOutput; - certificateReferences?: Array; - cloudServiceConfiguration?: CloudServiceConfigurationOutput; - readonly creationTime?: string; - readonly currentDedicatedNodes?: number; - readonly currentLowPriorityNodes?: number; - readonly currentNodeCommunicationMode?: string; - displayName?: string; - enableAutoScale?: boolean; - enableInterNodeCommunication?: boolean; - readonly eTag?: string; - id?: string; - readonly identity?: BatchPoolIdentityOutput; - readonly lastModified?: string; - metadata?: Array; - mountConfiguration?: Array; - networkConfiguration?: NetworkConfigurationOutput; - readonly resizeErrors?: Array; - resizeTimeout?: string; - startTask?: StartTaskOutput; - readonly state?: string; - readonly stateTransitionTime?: string; - readonly stats?: PoolStatisticsOutput; - targetDedicatedNodes?: number; - targetLowPriorityNodes?: number; - targetNodeCommunicationMode?: string; - taskSchedulingPolicy?: TaskSchedulingPolicyOutput; - taskSlotsPerNode?: number; - readonly url?: string; - userAccounts?: Array; - virtualMachineConfiguration?: VirtualMachineConfigurationOutput; - vmSize?: string; -} - -// @public -export interface BatchPoolResizeParameters { - nodeDeallocationOption?: string; - resizeTimeout?: string; - targetDedicatedNodes?: number; - targetLowPriorityNodes?: number; -} - -// @public (undocumented) -export type BatchServiceClient = Client & { - path: Routes; -}; - -// @public -export interface BatchTask { - affinityInfo?: AffinityInformation; - applicationPackageReferences?: Array; - authenticationTokenSettings?: AuthenticationTokenSettings; - commandLine?: string; - constraints?: TaskConstraints; - containerSettings?: TaskContainerSettings; - dependsOn?: TaskDependencies; - displayName?: string; - environmentSettings?: Array; - exitConditions?: ExitConditions; - id?: string; - multiInstanceSettings?: MultiInstanceSettings; - outputFiles?: Array; - requiredSlots?: number; - resourceFiles?: Array; - userIdentity?: UserIdentity; -} - -// @public -export interface BatchTaskCollection { - value: Array; -} - -// @public -export interface BatchTaskListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface BatchTaskListSubtasksResultOutput { - value?: Array; -} - -// @public -export interface BatchTaskOutput { - affinityInfo?: AffinityInformationOutput; - applicationPackageReferences?: Array; - authenticationTokenSettings?: AuthenticationTokenSettingsOutput; - commandLine?: string; - constraints?: TaskConstraintsOutput; - containerSettings?: TaskContainerSettingsOutput; - readonly creationTime?: string; - dependsOn?: TaskDependenciesOutput; - displayName?: string; - environmentSettings?: Array; - readonly eTag?: string; - readonly executionInfo?: TaskExecutionInformationOutput; - exitConditions?: ExitConditionsOutput; - id?: string; - readonly lastModified?: string; - multiInstanceSettings?: MultiInstanceSettingsOutput; - readonly nodeInfo?: ComputeNodeInformationOutput; - outputFiles?: Array; - readonly previousState?: string; - readonly previousStateTransitionTime?: string; - requiredSlots?: number; - resourceFiles?: Array; - readonly state?: string; - readonly stateTransitionTime?: string; - readonly stats?: TaskStatisticsOutput; - readonly url?: string; - userIdentity?: UserIdentityOutput; -} - -// @public -export interface Certificate { - certificateFormat?: string; - data?: string; - password?: string; - thumbprint?: string; - thumbprintAlgorithm?: string; -} - -// @public -export interface CertificateListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public (undocumented) -export interface CertificateOperationsAdd { - get(options: CertificateOperationsListParameters): StreamableMethod; - post(options: CertificateOperationsAddParameters): StreamableMethod; -} - -// @public (undocumented) -export interface CertificateOperationsAdd201Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface CertificateOperationsAdd201Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsAdd201Headers; - // (undocumented) - status: "201"; -} - -// @public (undocumented) -export interface CertificateOperationsAddBodyParam { - body: Certificate; -} - -// @public (undocumented) -export interface CertificateOperationsAddDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface CertificateOperationsAddDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsAddDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface CertificateOperationsAddHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & CertificateOperationsAddHeaders; -} - -// @public (undocumented) -export interface CertificateOperationsAddHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type CertificateOperationsAddParameters = CertificateOperationsAddQueryParam & CertificateOperationsAddHeaderParam & CertificateOperationsAddBodyParam & RequestParameters; - -// @public (undocumented) -export interface CertificateOperationsAddQueryParam { - // (undocumented) - queryParameters?: CertificateOperationsAddQueryParamProperties; -} - -// @public (undocumented) -export interface CertificateOperationsAddQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface CertificateOperationsCancelDeletion { - post(options?: CertificateOperationsCancelDeletionParameters): StreamableMethod; -} - -// @public (undocumented) -export interface CertificateOperationsCancelDeletion204Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface CertificateOperationsCancelDeletion204Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsCancelDeletion204Headers; - // (undocumented) - status: "204"; -} - -// @public (undocumented) -export interface CertificateOperationsCancelDeletionDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface CertificateOperationsCancelDeletionDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsCancelDeletionDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface CertificateOperationsCancelDeletionHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & CertificateOperationsCancelDeletionHeaders; -} - -// @public (undocumented) -export interface CertificateOperationsCancelDeletionHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type CertificateOperationsCancelDeletionParameters = CertificateOperationsCancelDeletionQueryParam & CertificateOperationsCancelDeletionHeaderParam & RequestParameters; - -// @public (undocumented) -export interface CertificateOperationsCancelDeletionQueryParam { - // (undocumented) - queryParameters?: CertificateOperationsCancelDeletionQueryParamProperties; -} - -// @public (undocumented) -export interface CertificateOperationsCancelDeletionQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface CertificateOperationsDelete { - delete(options?: CertificateOperationsDeleteParameters): StreamableMethod; - get(options: CertificateOperationsGetParameters): StreamableMethod; -} - -// @public (undocumented) -export interface CertificateOperationsDelete202Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface CertificateOperationsDelete202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsDelete202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface CertificateOperationsDeleteDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface CertificateOperationsDeleteDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsDeleteDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface CertificateOperationsDeleteHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & CertificateOperationsDeleteHeaders; -} - -// @public (undocumented) -export interface CertificateOperationsDeleteHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type CertificateOperationsDeleteParameters = CertificateOperationsDeleteQueryParam & CertificateOperationsDeleteHeaderParam & RequestParameters; - -// @public (undocumented) -export interface CertificateOperationsDeleteQueryParam { - // (undocumented) - queryParameters?: CertificateOperationsDeleteQueryParamProperties; -} - -// @public (undocumented) -export interface CertificateOperationsDeleteQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface CertificateOperationsGet200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface CertificateOperationsGet200Response extends HttpResponse { - // (undocumented) - body: CertificateOutput; - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsGet200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface CertificateOperationsGetDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface CertificateOperationsGetDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsGetDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface CertificateOperationsGetHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & CertificateOperationsGetHeaders; -} - -// @public (undocumented) -export interface CertificateOperationsGetHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type CertificateOperationsGetParameters = CertificateOperationsGetQueryParam & CertificateOperationsGetHeaderParam & RequestParameters; - -// @public (undocumented) -export interface CertificateOperationsGetQueryParam { - // (undocumented) - queryParameters: CertificateOperationsGetQueryParamProperties; -} - -// @public (undocumented) -export interface CertificateOperationsGetQueryParamProperties { - $select: string; - timeOut?: number; -} - -// @public (undocumented) -export interface CertificateOperationsList200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface CertificateOperationsList200Response extends HttpResponse { - // (undocumented) - body: CertificateListResultOutput; - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsList200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface CertificateOperationsListDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface CertificateOperationsListDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & CertificateOperationsListDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface CertificateOperationsListHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & CertificateOperationsListHeaders; -} - -// @public (undocumented) -export interface CertificateOperationsListHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type CertificateOperationsListParameters = CertificateOperationsListQueryParam & CertificateOperationsListHeaderParam & RequestParameters; - -// @public (undocumented) -export interface CertificateOperationsListQueryParam { - // (undocumented) - queryParameters: CertificateOperationsListQueryParamProperties; -} - -// @public (undocumented) -export interface CertificateOperationsListQueryParamProperties { - $filter: string; - $select: string; - maxresults?: number; - timeOut?: number; -} - -// @public -export interface CertificateOutput { - certificateFormat?: string; - data?: string; - readonly deleteCertificateError?: DeleteCertificateErrorOutput; - password?: string; - readonly previousState?: string; - readonly previousStateTransitionTime?: string; - readonly publicData?: string; - readonly state?: string; - readonly stateTransitionTime?: string; - thumbprint?: string; - thumbprintAlgorithm?: string; - readonly url?: string; -} - -// @public -export interface CertificateReference { - storeLocation?: string; - storeName?: string; - thumbprint: string; - thumbprintAlgorithm: string; - visibility?: string[]; -} - -// @public -export interface CertificateReferenceOutput { - storeLocation?: string; - storeName?: string; - thumbprint: string; - thumbprintAlgorithm: string; - visibility?: string[]; -} - -// @public -export interface CifsMountConfiguration { - mountOptions?: string; - password: string; - relativeMountPath: string; - source: string; - username: string; -} - -// @public -export interface CifsMountConfigurationOutput { - mountOptions?: string; - password: string; - relativeMountPath: string; - source: string; - username: string; -} - -// @public -export interface CloudServiceConfiguration { - osFamily: string; - osVersion?: string; -} - -// @public -export interface CloudServiceConfigurationOutput { - osFamily: string; - osVersion?: string; -} - -// @public -export interface ComputeNodeEndpointConfigurationOutput { - inboundEndpoints: Array; -} - -// @public -export interface ComputeNodeErrorOutput { - code?: string; - errorDetails?: Array; - message?: string; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsGet { - get(options?: ComputeNodeExtensionOperationsGetParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsGet200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface ComputeNodeExtensionOperationsGet200Response extends HttpResponse { - // (undocumented) - body: NodeVMExtensionOutput; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeExtensionOperationsGet200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsGetDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsGetDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeExtensionOperationsGetDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsGetHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeExtensionOperationsGetHeaders; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsGetHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeExtensionOperationsGetParameters = ComputeNodeExtensionOperationsGetQueryParam & ComputeNodeExtensionOperationsGetHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsGetQueryParam { - // (undocumented) - queryParameters?: ComputeNodeExtensionOperationsGetQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsGetQueryParamProperties { - $select?: string; - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsList { - get(options?: ComputeNodeExtensionOperationsListParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsList200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface ComputeNodeExtensionOperationsList200Response extends HttpResponse { - // (undocumented) - body: NodeVMExtensionListOutput; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeExtensionOperationsList200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsListDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsListDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeExtensionOperationsListDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsListHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeExtensionOperationsListHeaders; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsListHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeExtensionOperationsListParameters = ComputeNodeExtensionOperationsListQueryParam & ComputeNodeExtensionOperationsListHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsListQueryParam { - // (undocumented) - queryParameters?: ComputeNodeExtensionOperationsListQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeExtensionOperationsListQueryParamProperties { - $select?: string; - maxresults?: number; - timeOut?: number; -} - -// @public -export interface ComputeNodeGetRemoteLoginSettingsResultOutput { - readonly remoteLoginIPAddress: string; - remoteLoginPort: number; -} - -// @public -export interface ComputeNodeIdentityReference { - resourceId?: string; -} - -// @public -export interface ComputeNodeIdentityReferenceOutput { - resourceId?: string; -} - -// @public -export interface ComputeNodeInformation { - affinityId?: string; - nodeId?: string; - nodeUrl?: string; - poolId?: string; - taskRootDirectory?: string; - taskRootDirectoryUrl?: string; -} - -// @public -export interface ComputeNodeInformationOutput { - affinityId?: string; - nodeId?: string; - nodeUrl?: string; - poolId?: string; - taskRootDirectory?: string; - taskRootDirectoryUrl?: string; -} - -// @public -export interface ComputeNodeListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public (undocumented) -export interface ComputeNodeOperationsAddUser { - post(options: ComputeNodeOperationsAddUserParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsAddUser201Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsAddUser201Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsAddUser201Headers; - // (undocumented) - status: "201"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsAddUserBodyParam { - body: ComputeNodeUser; -} - -// @public (undocumented) -export interface ComputeNodeOperationsAddUserDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsAddUserDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsAddUserDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsAddUserHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsAddUserHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsAddUserHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsAddUserParameters = ComputeNodeOperationsAddUserQueryParam & ComputeNodeOperationsAddUserHeaderParam & ComputeNodeOperationsAddUserBodyParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsAddUserQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsAddUserQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsAddUserQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDeleteUser { - delete(options?: ComputeNodeOperationsDeleteUserParameters): StreamableMethod; - put(options: ComputeNodeOperationsUpdateUserParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDeleteUser200Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface ComputeNodeOperationsDeleteUser200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsDeleteUser200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDeleteUserDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDeleteUserDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsDeleteUserDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDeleteUserHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsDeleteUserHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDeleteUserHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsDeleteUserParameters = ComputeNodeOperationsDeleteUserQueryParam & ComputeNodeOperationsDeleteUserHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsDeleteUserQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsDeleteUserQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDeleteUserQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDisableScheduling { - post(options: ComputeNodeOperationsDisableSchedulingParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDisableScheduling200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsDisableScheduling200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsDisableScheduling200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDisableSchedulingBodyParam { - body: NodeDisableSchedulingParameters; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDisableSchedulingDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDisableSchedulingDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsDisableSchedulingDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDisableSchedulingHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsDisableSchedulingHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDisableSchedulingHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsDisableSchedulingParameters = ComputeNodeOperationsDisableSchedulingQueryParam & ComputeNodeOperationsDisableSchedulingHeaderParam & ComputeNodeOperationsDisableSchedulingBodyParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsDisableSchedulingQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsDisableSchedulingQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsDisableSchedulingQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsEnableScheduling { - post(options?: ComputeNodeOperationsEnableSchedulingParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsEnableScheduling200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsEnableScheduling200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsEnableScheduling200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsEnableSchedulingDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsEnableSchedulingDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsEnableSchedulingDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsEnableSchedulingHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsEnableSchedulingHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsEnableSchedulingHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsEnableSchedulingParameters = ComputeNodeOperationsEnableSchedulingQueryParam & ComputeNodeOperationsEnableSchedulingHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsEnableSchedulingQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsEnableSchedulingQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsEnableSchedulingQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGet { - get(options?: ComputeNodeOperationsGetParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGet200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsGet200Response extends HttpResponse { - // (undocumented) - body: ComputeNodeOutput; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsGet200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsGetDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsGetHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsGetParameters = ComputeNodeOperationsGetQueryParam & ComputeNodeOperationsGetHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsGetQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsGetQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetQueryParamProperties { - $select?: string; - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteDesktop { - get(options?: ComputeNodeOperationsGetRemoteDesktopParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteDesktop200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsGetRemoteDesktop200Response extends HttpResponse { - body: Uint8Array; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsGetRemoteDesktop200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteDesktopDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteDesktopDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsGetRemoteDesktopDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteDesktopHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsGetRemoteDesktopHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteDesktopHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsGetRemoteDesktopParameters = ComputeNodeOperationsGetRemoteDesktopQueryParam & ComputeNodeOperationsGetRemoteDesktopHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteDesktopQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsGetRemoteDesktopQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteDesktopQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteLoginSettings { - get(options?: ComputeNodeOperationsGetRemoteLoginSettingsParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteLoginSettings200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsGetRemoteLoginSettings200Response extends HttpResponse { - // (undocumented) - body: ComputeNodeGetRemoteLoginSettingsResultOutput; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsGetRemoteLoginSettings200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteLoginSettingsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsGetRemoteLoginSettingsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteLoginSettingsHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsGetRemoteLoginSettingsHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteLoginSettingsHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsGetRemoteLoginSettingsParameters = ComputeNodeOperationsGetRemoteLoginSettingsQueryParam & ComputeNodeOperationsGetRemoteLoginSettingsHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteLoginSettingsQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsGetRemoteLoginSettingsQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsGetRemoteLoginSettingsQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsList { - get(options: ComputeNodeOperationsListParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsList200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsList200Response extends HttpResponse { - // (undocumented) - body: ComputeNodeListResultOutput; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsList200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsListDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsListDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsListDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsListHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsListHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsListHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsListParameters = ComputeNodeOperationsListQueryParam & ComputeNodeOperationsListHeaderParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsListQueryParam { - // (undocumented) - queryParameters: ComputeNodeOperationsListQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsListQueryParamProperties { - $filter: string; - $select?: string; - maxresults?: number; - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReboot { - post(options: ComputeNodeOperationsRebootParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReboot202Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsReboot202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsReboot202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsRebootBodyParam { - body: NodeRebootParameters; -} - -// @public (undocumented) -export interface ComputeNodeOperationsRebootDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsRebootDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsRebootDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsRebootHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsRebootHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsRebootHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsRebootParameters = ComputeNodeOperationsRebootQueryParam & ComputeNodeOperationsRebootHeaderParam & ComputeNodeOperationsRebootBodyParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsRebootQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsRebootQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsRebootQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReimage { - post(options: ComputeNodeOperationsReimageParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReimage202Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsReimage202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsReimage202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReimageBodyParam { - body: NodeReimageParameters; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReimageDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReimageDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsReimageDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReimageHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsReimageHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReimageHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsReimageParameters = ComputeNodeOperationsReimageQueryParam & ComputeNodeOperationsReimageHeaderParam & ComputeNodeOperationsReimageBodyParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsReimageQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsReimageQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsReimageQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUpdateUser200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface ComputeNodeOperationsUpdateUser200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsUpdateUser200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUpdateUserBodyParam { - body: NodeUpdateUserParameters; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUpdateUserDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUpdateUserDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsUpdateUserDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUpdateUserHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsUpdateUserHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUpdateUserHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsUpdateUserParameters = ComputeNodeOperationsUpdateUserQueryParam & ComputeNodeOperationsUpdateUserHeaderParam & ComputeNodeOperationsUpdateUserBodyParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsUpdateUserQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsUpdateUserQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUpdateUserQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogs { - post(options: ComputeNodeOperationsUploadBatchServiceLogsParameters): StreamableMethod; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogs200Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface ComputeNodeOperationsUploadBatchServiceLogs200Response extends HttpResponse { - // (undocumented) - body: UploadBatchServiceLogsResultOutput; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsUploadBatchServiceLogs200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogsBodyParam { - body: UploadBatchServiceLogsConfiguration; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & ComputeNodeOperationsUploadBatchServiceLogsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogsHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & ComputeNodeOperationsUploadBatchServiceLogsHeaders; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogsHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type ComputeNodeOperationsUploadBatchServiceLogsParameters = ComputeNodeOperationsUploadBatchServiceLogsQueryParam & ComputeNodeOperationsUploadBatchServiceLogsHeaderParam & ComputeNodeOperationsUploadBatchServiceLogsBodyParam & RequestParameters; - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogsQueryParam { - // (undocumented) - queryParameters?: ComputeNodeOperationsUploadBatchServiceLogsQueryParamProperties; -} - -// @public (undocumented) -export interface ComputeNodeOperationsUploadBatchServiceLogsQueryParamProperties { - timeOut?: number; -} - -// @public -export interface ComputeNodeOutput { - affinityId?: string; - allocationTime?: string; - certificateReferences?: Array; - endpointConfiguration?: ComputeNodeEndpointConfigurationOutput; - errors?: Array; - id?: string; - ipAddress?: string; - isDedicated?: boolean; - lastBootTime?: string; - nodeAgentInfo?: NodeAgentInformationOutput; - recentTasks?: Array; - runningTasksCount?: number; - runningTaskSlotsCount?: number; - schedulingState?: string; - startTask?: StartTaskOutput; - startTaskInfo?: StartTaskInformationOutput; - state?: string; - stateTransitionTime?: string; - totalTasksRun?: number; - totalTasksSucceeded?: number; - url?: string; - virtualMachineInfo?: VirtualMachineInfoOutput; - vmSize?: string; -} - -// @public -export interface ComputeNodeUser { - expiryTime?: Date | string; - isAdmin?: boolean; - name: string; - password?: string; - sshPublicKey?: string; -} - -// @public -export interface ContainerConfiguration { - containerImageNames?: string[]; - containerRegistries?: Array; - type: string; -} - -// @public -export interface ContainerConfigurationOutput { - containerImageNames?: string[]; - containerRegistries?: Array; - type: string; -} - -// @public -export interface ContainerRegistry { - identityReference?: ComputeNodeIdentityReference; - password?: string; - registryServer?: string; - username?: string; -} - -// @public -export interface ContainerRegistryOutput { - identityReference?: ComputeNodeIdentityReferenceOutput; - password?: string; - registryServer?: string; - username?: string; -} - -// @public -function createClient(endpoint: string, credentials: TokenCredential, options?: ClientOptions): BatchServiceClient; -export default createClient; - -// @public -export interface DataDisk { - caching?: string; - diskSizeGB: number; - lun: number; - storageAccountType?: string; -} - -// @public -export interface DataDiskOutput { - caching?: string; - diskSizeGB: number; - lun: number; - storageAccountType?: string; -} - -// @public -export interface DeleteCertificateError { - code?: string; - message?: string; - values?: Array; -} - -// @public -export interface DeleteCertificateErrorOutput { - code?: string; - message?: string; - values?: Array; -} - -// @public -export interface DiffDiskSettings { - placement?: string; -} - -// @public -export interface DiffDiskSettingsOutput { - placement?: string; -} - -// @public -export interface DiskEncryptionConfiguration { - targets?: string[]; -} - -// @public -export interface DiskEncryptionConfigurationOutput { - targets?: string[]; -} - -// @public -export interface EnvironmentSetting { - name: string; - value?: string; -} - -// @public -export interface EnvironmentSettingOutput { - name: string; - value?: string; -} - -// @public -export interface ErrorMessageOutput { - lang?: string; - value?: string; -} - -// @public -export interface ExitCodeMapping { - code: number; - exitOptions: ExitOptions; -} - -// @public -export interface ExitCodeMappingOutput { - code: number; - exitOptions: ExitOptionsOutput; -} - -// @public -export interface ExitCodeRangeMapping { - end: number; - exitOptions: ExitOptions; - start: number; -} - -// @public -export interface ExitCodeRangeMappingOutput { - end: number; - exitOptions: ExitOptionsOutput; - start: number; -} - -// @public -export interface ExitConditions { - default?: ExitOptions; - exitCodeRanges?: Array; - exitCodes?: Array; - fileUploadError?: ExitOptions; - preProcessingError?: ExitOptions; -} - -// @public -export interface ExitConditionsOutput { - default?: ExitOptionsOutput; - exitCodeRanges?: Array; - exitCodes?: Array; - fileUploadError?: ExitOptionsOutput; - preProcessingError?: ExitOptionsOutput; -} - -// @public -export interface ExitOptions { - dependencyAction?: string; - jobAction?: string; -} - -// @public -export interface ExitOptionsOutput { - dependencyAction?: string; - jobAction?: string; -} - -// @public (undocumented) -export interface FileDeleteFromComputeNode { - delete(options?: FileDeleteFromComputeNodeParameters): StreamableMethod; - get(options?: FileGetFromComputeNodeParameters): StreamableMethod; - head(options?: FileGetPropertiesFromComputeNodeParameters): StreamableMethod; -} - -// @public (undocumented) -export interface FileDeleteFromComputeNode200Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface FileDeleteFromComputeNode200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & FileDeleteFromComputeNode200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface FileDeleteFromComputeNodeDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface FileDeleteFromComputeNodeDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & FileDeleteFromComputeNodeDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface FileDeleteFromComputeNodeHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & FileDeleteFromComputeNodeHeaders; -} - -// @public (undocumented) -export interface FileDeleteFromComputeNodeHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type FileDeleteFromComputeNodeParameters = FileDeleteFromComputeNodeQueryParam & FileDeleteFromComputeNodeHeaderParam & RequestParameters; - -// @public (undocumented) -export interface FileDeleteFromComputeNodeQueryParam { - // (undocumented) - queryParameters?: FileDeleteFromComputeNodeQueryParamProperties; -} - -// @public (undocumented) -export interface FileDeleteFromComputeNodeQueryParamProperties { - recursive?: boolean; - timeOut?: number; -} - -// @public (undocumented) -export interface FileDeleteFromTask { - delete(options: FileDeleteFromTaskParameters): StreamableMethod; - get(options?: FileGetFromTaskParameters): StreamableMethod; - head(options?: FileGetPropertiesFromTaskParameters): StreamableMethod; -} - -// @public (undocumented) -export interface FileDeleteFromTask200Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface FileDeleteFromTask200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & FileDeleteFromTask200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface FileDeleteFromTaskDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface FileDeleteFromTaskDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & FileDeleteFromTaskDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface FileDeleteFromTaskHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & FileDeleteFromTaskHeaders; -} - -// @public (undocumented) -export interface FileDeleteFromTaskHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type FileDeleteFromTaskParameters = FileDeleteFromTaskQueryParam & FileDeleteFromTaskHeaderParam & RequestParameters; - -// @public (undocumented) -export interface FileDeleteFromTaskQueryParam { - // (undocumented) - queryParameters: FileDeleteFromTaskQueryParamProperties; -} - -// @public (undocumented) -export interface FileDeleteFromTaskQueryParamProperties { - recursive: boolean; - timeOut?: number; -} - -// @public (undocumented) -export interface FileGetFromComputeNode200Headers { - "client-request-id"?: string; - "content-length": number; - "last-modified"?: string; - "ocp-batch-file-isdirectory": boolean; - "ocp-batch-file-mode": string; - "ocp-batch-file-url": string; - "ocp-creation-time"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface FileGetFromComputeNode200Response extends HttpResponse { - body: Uint8Array; - // (undocumented) - headers: RawHttpHeaders & FileGetFromComputeNode200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface FileGetFromComputeNodeDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface FileGetFromComputeNodeDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & FileGetFromComputeNodeDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface FileGetFromComputeNodeHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & FileGetFromComputeNodeHeaders; -} - -// @public (undocumented) -export interface FileGetFromComputeNodeHeaders { - "client-request-id"?: string; - "if-modified-since"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "ocp-range"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type FileGetFromComputeNodeParameters = FileGetFromComputeNodeQueryParam & FileGetFromComputeNodeHeaderParam & RequestParameters; - -// @public (undocumented) -export interface FileGetFromComputeNodeQueryParam { - // (undocumented) - queryParameters?: FileGetFromComputeNodeQueryParamProperties; -} - -// @public (undocumented) -export interface FileGetFromComputeNodeQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface FileGetFromTask200Headers { - "client-request-id"?: string; - "content-length": number; - "last-modified"?: string; - "ocp-batch-file-isdirectory": boolean; - "ocp-batch-file-mode": string; - "ocp-batch-file-url": string; - "ocp-creation-time"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface FileGetFromTask200Response extends HttpResponse { - body: Uint8Array; - // (undocumented) - headers: RawHttpHeaders & FileGetFromTask200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface FileGetFromTaskDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface FileGetFromTaskDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & FileGetFromTaskDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface FileGetFromTaskHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & FileGetFromTaskHeaders; -} - -// @public (undocumented) -export interface FileGetFromTaskHeaders { - "client-request-id"?: string; - "if-modified-since"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "ocp-range"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type FileGetFromTaskParameters = FileGetFromTaskQueryParam & FileGetFromTaskHeaderParam & RequestParameters; - -// @public (undocumented) -export interface FileGetFromTaskQueryParam { - // (undocumented) - queryParameters?: FileGetFromTaskQueryParamProperties; -} - -// @public (undocumented) -export interface FileGetFromTaskQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface FileGetPropertiesFromComputeNode200Headers { - "client-request-id"?: string; - "content-length": number; - "last-modified"?: string; - "ocp-batch-file-isdirectory": boolean; - "ocp-batch-file-mode": string; - "ocp-batch-file-url": string; - "ocp-creation-time"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface FileGetPropertiesFromComputeNode200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & FileGetPropertiesFromComputeNode200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface FileGetPropertiesFromComputeNodeDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface FileGetPropertiesFromComputeNodeDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & FileGetPropertiesFromComputeNodeDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface FileGetPropertiesFromComputeNodeHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & FileGetPropertiesFromComputeNodeHeaders; -} - -// @public (undocumented) -export interface FileGetPropertiesFromComputeNodeHeaders { - "client-request-id"?: string; - "if-modified-since"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type FileGetPropertiesFromComputeNodeParameters = FileGetPropertiesFromComputeNodeQueryParam & FileGetPropertiesFromComputeNodeHeaderParam & RequestParameters; - -// @public (undocumented) -export interface FileGetPropertiesFromComputeNodeQueryParam { - // (undocumented) - queryParameters?: FileGetPropertiesFromComputeNodeQueryParamProperties; -} - -// @public (undocumented) -export interface FileGetPropertiesFromComputeNodeQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface FileGetPropertiesFromTask200Headers { - "client-request-id"?: string; - "content-length": number; - "last-modified"?: string; - "ocp-batch-file-isdirectory": boolean; - "ocp-batch-file-mode": string; - "ocp-batch-file-url": string; - "ocp-creation-time"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface FileGetPropertiesFromTask200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & FileGetPropertiesFromTask200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface FileGetPropertiesFromTaskDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface FileGetPropertiesFromTaskDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & FileGetPropertiesFromTaskDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface FileGetPropertiesFromTaskHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & FileGetPropertiesFromTaskHeaders; -} - -// @public (undocumented) -export interface FileGetPropertiesFromTaskHeaders { - "client-request-id"?: string; - "if-modified-since"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type FileGetPropertiesFromTaskParameters = FileGetPropertiesFromTaskQueryParam & FileGetPropertiesFromTaskHeaderParam & RequestParameters; - -// @public (undocumented) -export interface FileGetPropertiesFromTaskQueryParam { - // (undocumented) - queryParameters?: FileGetPropertiesFromTaskQueryParamProperties; -} - -// @public (undocumented) -export interface FileGetPropertiesFromTaskQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface FileListFromComputeNode { - get(options: FileListFromComputeNodeParameters): StreamableMethod; -} - -// @public (undocumented) -export interface FileListFromComputeNode200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface FileListFromComputeNode200Response extends HttpResponse { - // (undocumented) - body: NodeFileListResultOutput; - // (undocumented) - headers: RawHttpHeaders & FileListFromComputeNode200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface FileListFromComputeNodeDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface FileListFromComputeNodeDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & FileListFromComputeNodeDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface FileListFromComputeNodeHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & FileListFromComputeNodeHeaders; -} - -// @public (undocumented) -export interface FileListFromComputeNodeHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type FileListFromComputeNodeParameters = FileListFromComputeNodeQueryParam & FileListFromComputeNodeHeaderParam & RequestParameters; - -// @public (undocumented) -export interface FileListFromComputeNodeQueryParam { - // (undocumented) - queryParameters: FileListFromComputeNodeQueryParamProperties; -} - -// @public (undocumented) -export interface FileListFromComputeNodeQueryParamProperties { - $filter: string; - maxresults?: number; - recursive: boolean; - timeOut?: number; -} - -// @public (undocumented) -export interface FileListFromTask { - get(options: FileListFromTaskParameters): StreamableMethod; -} - -// @public (undocumented) -export interface FileListFromTask200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface FileListFromTask200Response extends HttpResponse { - // (undocumented) - body: NodeFileListResultOutput; - // (undocumented) - headers: RawHttpHeaders & FileListFromTask200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface FileListFromTaskDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface FileListFromTaskDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & FileListFromTaskDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface FileListFromTaskHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & FileListFromTaskHeaders; -} - -// @public (undocumented) -export interface FileListFromTaskHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type FileListFromTaskParameters = FileListFromTaskQueryParam & FileListFromTaskHeaderParam & RequestParameters; - -// @public (undocumented) -export interface FileListFromTaskQueryParam { - // (undocumented) - queryParameters: FileListFromTaskQueryParamProperties; -} - -// @public (undocumented) -export interface FileListFromTaskQueryParamProperties { - $filter: string; - maxresults?: number; - recursive: boolean; - timeOut?: number; -} - -// @public -export interface FilePropertiesOutput { - contentLength: number; - contentType?: string; - creationTime?: string; - fileMode?: string; - lastModified: string; -} - -// @public -export type GetArrayType = T extends Array ? TData : never; - -// @public -export type GetPage = (pageLink: string, maxPageSize?: number) => Promise<{ - page: TPage; - nextPageLink?: string; -}>; - -// @public -export interface HttpHeader { - name: string; - value?: string; -} - -// @public -export interface HttpHeaderOutput { - name: string; - value?: string; -} - -// @public -export interface ImageInformationOutput { - batchSupportEndOfLife?: string; - capabilities?: string[]; - imageReference: ImageReferenceOutput; - readonly nodeAgentSKUId: string; - osType: string; - verificationType: string; -} - -// @public -export interface ImageReference { - offer?: string; - publisher?: string; - sku?: string; - version?: string; - virtualMachineImageId?: string; -} - -// @public -export interface ImageReferenceOutput { - readonly exactVersion?: string; - offer?: string; - publisher?: string; - sku?: string; - version?: string; - virtualMachineImageId?: string; -} - -// @public -export interface InboundEndpointOutput { - backendPort: number; - frontendPort: number; - name: string; - protocol: string; - publicFQDN: string; - publicIPAddress: string; -} - -// @public -export interface InboundNATPool { - backendPort: number; - frontendPortRangeEnd: number; - frontendPortRangeStart: number; - name: string; - networkSecurityGroupRules?: Array; - protocol: string; -} - -// @public -export interface InboundNATPoolOutput { - backendPort: number; - frontendPortRangeEnd: number; - frontendPortRangeStart: number; - name: string; - networkSecurityGroupRules?: Array; - protocol: string; -} - -// @public -export interface InstanceViewStatusOutput { - code?: string; - displayStatus?: string; - level?: string; - message?: string; - time?: string; -} - -// @public (undocumented) -export function isUnexpected(response: ApplicationOperationsList200Response | ApplicationOperationsListDefaultResponse): response is ApplicationOperationsListDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ApplicationOperationsGet200Response | ApplicationOperationsGetDefaultResponse): response is ApplicationOperationsGetDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolListUsageMetrics200Response | PoolListUsageMetricsDefaultResponse): response is PoolListUsageMetricsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolGetAllLifetimeStatistics200Response | PoolGetAllLifetimeStatisticsDefaultResponse): response is PoolGetAllLifetimeStatisticsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolAdd201Response | PoolAddDefaultResponse): response is PoolAddDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolList200Response | PoolListDefaultResponse): response is PoolListDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolDelete202Response | PoolDeleteDefaultResponse): response is PoolDeleteDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolExists404Response | PoolExistsDefaultResponse): response is PoolExistsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolGet200Response | PoolGetDefaultResponse): response is PoolGetDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolPatch200Response | PoolPatchDefaultResponse): response is PoolPatchDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolDisableAutoScale200Response | PoolDisableAutoScaleDefaultResponse): response is PoolDisableAutoScaleDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolEnableAutoScale200Response | PoolEnableAutoScaleDefaultResponse): response is PoolEnableAutoScaleDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolEvaluateAutoScale200Response | PoolEvaluateAutoScaleDefaultResponse): response is PoolEvaluateAutoScaleDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolResize200Response | PoolResizeDefaultResponse): response is PoolResizeDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolStopResize200Response | PoolStopResizeDefaultResponse): response is PoolStopResizeDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolUpdateProperties200Response | PoolUpdatePropertiesDefaultResponse): response is PoolUpdatePropertiesDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: PoolRemoveNodes200Response | PoolRemoveNodesDefaultResponse): response is PoolRemoveNodesDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: AccountListSupportedImages200Response | AccountListSupportedImagesDefaultResponse): response is AccountListSupportedImagesDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: AccountListPoolNodeCounts200Response | AccountListPoolNodeCountsDefaultResponse): response is AccountListPoolNodeCountsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobGetAllLifetimeStatistics200Response | JobGetAllLifetimeStatisticsDefaultResponse): response is JobGetAllLifetimeStatisticsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobDelete202Response | JobDeleteDefaultResponse): response is JobDeleteDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobGet200Response | JobGetDefaultResponse): response is JobGetDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobPatch200Response | JobPatchDefaultResponse): response is JobPatchDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobUpdate200Response | JobUpdateDefaultResponse): response is JobUpdateDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobDisable202Response | JobDisableDefaultResponse): response is JobDisableDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobEnable202Response | JobEnableDefaultResponse): response is JobEnableDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobTerminate202Response | JobTerminateDefaultResponse): response is JobTerminateDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobAdd201Response | JobAddDefaultResponse): response is JobAddDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobList200Response | JobListDefaultResponse): response is JobListDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobListFromJobSchedule200Response | JobListFromJobScheduleDefaultResponse): response is JobListFromJobScheduleDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobListPreparationAndReleaseTaskStatus200Response | JobListPreparationAndReleaseTaskStatusDefaultResponse): response is JobListPreparationAndReleaseTaskStatusDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobGetTaskCounts200Response | JobGetTaskCountsDefaultResponse): response is JobGetTaskCountsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: CertificateOperationsAdd201Response | CertificateOperationsAddDefaultResponse): response is CertificateOperationsAddDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: CertificateOperationsList200Response | CertificateOperationsListDefaultResponse): response is CertificateOperationsListDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: CertificateOperationsCancelDeletion204Response | CertificateOperationsCancelDeletionDefaultResponse): response is CertificateOperationsCancelDeletionDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: CertificateOperationsDelete202Response | CertificateOperationsDeleteDefaultResponse): response is CertificateOperationsDeleteDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: CertificateOperationsGet200Response | CertificateOperationsGetDefaultResponse): response is CertificateOperationsGetDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: FileDeleteFromTask200Response | FileDeleteFromTaskDefaultResponse): response is FileDeleteFromTaskDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: FileGetFromTask200Response | FileGetFromTaskDefaultResponse): response is FileGetFromTaskDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: FileGetPropertiesFromTask200Response | FileGetPropertiesFromTaskDefaultResponse): response is FileGetPropertiesFromTaskDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: FileDeleteFromComputeNode200Response | FileDeleteFromComputeNodeDefaultResponse): response is FileDeleteFromComputeNodeDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: FileGetFromComputeNode200Response | FileGetFromComputeNodeDefaultResponse): response is FileGetFromComputeNodeDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: FileGetPropertiesFromComputeNode200Response | FileGetPropertiesFromComputeNodeDefaultResponse): response is FileGetPropertiesFromComputeNodeDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: FileListFromTask200Response | FileListFromTaskDefaultResponse): response is FileListFromTaskDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: FileListFromComputeNode200Response | FileListFromComputeNodeDefaultResponse): response is FileListFromComputeNodeDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleExists200Response | JobScheduleExists204Response | JobScheduleExistsDefaultResponse): response is JobScheduleExistsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleDelete202Response | JobScheduleDeleteDefaultResponse): response is JobScheduleDeleteDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleGet200Response | JobScheduleGetDefaultResponse): response is JobScheduleGetDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobSchedulePatch200Response | JobSchedulePatchDefaultResponse): response is JobSchedulePatchDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleUpdate200Response | JobScheduleUpdateDefaultResponse): response is JobScheduleUpdateDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleDisable204Response | JobScheduleDisableDefaultResponse): response is JobScheduleDisableDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleEnable204Response | JobScheduleEnableDefaultResponse): response is JobScheduleEnableDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleTerminate202Response | JobScheduleTerminateDefaultResponse): response is JobScheduleTerminateDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleAdd201Response | JobScheduleAddDefaultResponse): response is JobScheduleAddDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: JobScheduleList200Response | JobScheduleListDefaultResponse): response is JobScheduleListDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskAdd201Response | TaskAddDefaultResponse): response is TaskAddDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskList200Response | TaskListDefaultResponse): response is TaskListDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskAddCollection200Response | TaskAddCollectionDefaultResponse): response is TaskAddCollectionDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskDelete200Response | TaskDeleteDefaultResponse): response is TaskDeleteDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskGet200Response | TaskGetDefaultResponse): response is TaskGetDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskUpdate200Response | TaskUpdateDefaultResponse): response is TaskUpdateDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskListSubtasks200Response | TaskListSubtasksDefaultResponse): response is TaskListSubtasksDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskTerminate204Response | TaskTerminateDefaultResponse): response is TaskTerminateDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: TaskReactivate204Response | TaskReactivateDefaultResponse): response is TaskReactivateDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsAddUser201Response | ComputeNodeOperationsAddUserDefaultResponse): response is ComputeNodeOperationsAddUserDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsDeleteUser200Response | ComputeNodeOperationsDeleteUserDefaultResponse): response is ComputeNodeOperationsDeleteUserDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsUpdateUser200Response | ComputeNodeOperationsUpdateUserDefaultResponse): response is ComputeNodeOperationsUpdateUserDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsGet200Response | ComputeNodeOperationsGetDefaultResponse): response is ComputeNodeOperationsGetDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsReboot202Response | ComputeNodeOperationsRebootDefaultResponse): response is ComputeNodeOperationsRebootDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsReimage202Response | ComputeNodeOperationsReimageDefaultResponse): response is ComputeNodeOperationsReimageDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsDisableScheduling200Response | ComputeNodeOperationsDisableSchedulingDefaultResponse): response is ComputeNodeOperationsDisableSchedulingDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsEnableScheduling200Response | ComputeNodeOperationsEnableSchedulingDefaultResponse): response is ComputeNodeOperationsEnableSchedulingDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsGetRemoteLoginSettings200Response | ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse): response is ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsGetRemoteDesktop200Response | ComputeNodeOperationsGetRemoteDesktopDefaultResponse): response is ComputeNodeOperationsGetRemoteDesktopDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsUploadBatchServiceLogs200Response | ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse): response is ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeOperationsList200Response | ComputeNodeOperationsListDefaultResponse): response is ComputeNodeOperationsListDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeExtensionOperationsGet200Response | ComputeNodeExtensionOperationsGetDefaultResponse): response is ComputeNodeExtensionOperationsGetDefaultResponse; - -// @public (undocumented) -export function isUnexpected(response: ComputeNodeExtensionOperationsList200Response | ComputeNodeExtensionOperationsListDefaultResponse): response is ComputeNodeExtensionOperationsListDefaultResponse; - -// @public (undocumented) -export interface JobAdd { - get(options: JobListParameters): StreamableMethod; - post(options: JobAddParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobAdd201Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobAdd201Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobAdd201Headers; - // (undocumented) - status: "201"; -} - -// @public (undocumented) -export interface JobAddBodyParam { - body: BatchJob; -} - -// @public (undocumented) -export interface JobAddDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobAddDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobAddDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobAddHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobAddHeaders; -} - -// @public (undocumented) -export interface JobAddHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobAddParameters = JobAddQueryParam & JobAddHeaderParam & JobAddBodyParam & RequestParameters; - -// @public (undocumented) -export interface JobAddQueryParam { - // (undocumented) - queryParameters?: JobAddQueryParamProperties; -} - -// @public (undocumented) -export interface JobAddQueryParamProperties { - timeOut?: number; -} - -// @public -export interface JobConstraints { - maxTaskRetryCount?: number; - maxWallClockTime?: string; -} - -// @public -export interface JobConstraintsOutput { - maxTaskRetryCount?: number; - maxWallClockTime?: string; -} - -// @public (undocumented) -export interface JobDelete { - delete(options?: JobDeleteParameters): StreamableMethod; - get(options?: JobGetParameters): StreamableMethod; - patch(options: JobPatchParameters): StreamableMethod; - put(options: JobUpdateParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobDelete202Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface JobDelete202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobDelete202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface JobDeleteDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobDeleteDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobDeleteDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobDeleteHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobDeleteHeaders; -} - -// @public (undocumented) -export interface JobDeleteHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobDeleteParameters = JobDeleteQueryParam & JobDeleteHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobDeleteQueryParam { - // (undocumented) - queryParameters?: JobDeleteQueryParamProperties; -} - -// @public (undocumented) -export interface JobDeleteQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobDisable { - post(options: JobDisableParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobDisable202Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobDisable202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobDisable202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface JobDisableBodyParam { - body: BatchJobDisableParameters; -} - -// @public (undocumented) -export interface JobDisableDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobDisableDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobDisableDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobDisableHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobDisableHeaders; -} - -// @public (undocumented) -export interface JobDisableHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobDisableParameters = JobDisableQueryParam & JobDisableHeaderParam & JobDisableBodyParam & RequestParameters; - -// @public (undocumented) -export interface JobDisableQueryParam { - // (undocumented) - queryParameters?: JobDisableQueryParamProperties; -} - -// @public (undocumented) -export interface JobDisableQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobEnable { - post(options?: JobEnableParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobEnable202Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobEnable202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobEnable202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface JobEnableDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobEnableDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobEnableDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobEnableHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobEnableHeaders; -} - -// @public (undocumented) -export interface JobEnableHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobEnableParameters = JobEnableQueryParam & JobEnableHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobEnableQueryParam { - // (undocumented) - queryParameters?: JobEnableQueryParamProperties; -} - -// @public (undocumented) -export interface JobEnableQueryParamProperties { - timeOut?: number; -} - -// @public -export interface JobExecutionInformation { - endTime?: Date | string; - poolId?: string; - schedulingError?: JobSchedulingError; - startTime: Date | string; - terminateReason?: string; -} - -// @public -export interface JobExecutionInformationOutput { - endTime?: string; - poolId?: string; - schedulingError?: JobSchedulingErrorOutput; - startTime: string; - terminateReason?: string; -} - -// @public (undocumented) -export interface JobGet200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface JobGet200Response extends HttpResponse { - // (undocumented) - body: BatchJobOutput; - // (undocumented) - headers: RawHttpHeaders & JobGet200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobGetAllLifetimeStatistics { - get(options?: JobGetAllLifetimeStatisticsParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobGetAllLifetimeStatistics200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface JobGetAllLifetimeStatistics200Response extends HttpResponse { - // (undocumented) - body: JobStatisticsOutput; - // (undocumented) - headers: RawHttpHeaders & JobGetAllLifetimeStatistics200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobGetAllLifetimeStatisticsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobGetAllLifetimeStatisticsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobGetAllLifetimeStatisticsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobGetAllLifetimeStatisticsHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobGetAllLifetimeStatisticsHeaders; -} - -// @public (undocumented) -export interface JobGetAllLifetimeStatisticsHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobGetAllLifetimeStatisticsParameters = JobGetAllLifetimeStatisticsQueryParam & JobGetAllLifetimeStatisticsHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobGetAllLifetimeStatisticsQueryParam { - // (undocumented) - queryParameters?: JobGetAllLifetimeStatisticsQueryParamProperties; -} - -// @public (undocumented) -export interface JobGetAllLifetimeStatisticsQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobGetDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobGetDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobGetDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobGetHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobGetHeaders; -} - -// @public (undocumented) -export interface JobGetHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobGetParameters = JobGetQueryParam & JobGetHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobGetQueryParam { - // (undocumented) - queryParameters?: JobGetQueryParamProperties; -} - -// @public (undocumented) -export interface JobGetQueryParamProperties { - $expand?: string; - $select?: string; - timeOut?: number; -} - -// @public (undocumented) -export interface JobGetTaskCounts { - get(options?: JobGetTaskCountsParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobGetTaskCounts200Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface JobGetTaskCounts200Response extends HttpResponse { - // (undocumented) - body: TaskCountsResultOutput; - // (undocumented) - headers: RawHttpHeaders & JobGetTaskCounts200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobGetTaskCountsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobGetTaskCountsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobGetTaskCountsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobGetTaskCountsHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobGetTaskCountsHeaders; -} - -// @public (undocumented) -export interface JobGetTaskCountsHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobGetTaskCountsParameters = JobGetTaskCountsQueryParam & JobGetTaskCountsHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobGetTaskCountsQueryParam { - // (undocumented) - queryParameters?: JobGetTaskCountsQueryParamProperties; -} - -// @public (undocumented) -export interface JobGetTaskCountsQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobList200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface JobList200Response extends HttpResponse { - // (undocumented) - body: BatchJobListResultOutput; - // (undocumented) - headers: RawHttpHeaders & JobList200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobListDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobListDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobListDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobListFromJobSchedule { - get(options: JobListFromJobScheduleParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobListFromJobSchedule200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface JobListFromJobSchedule200Response extends HttpResponse { - // (undocumented) - body: BatchJobListResultOutput; - // (undocumented) - headers: RawHttpHeaders & JobListFromJobSchedule200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobListFromJobScheduleDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobListFromJobScheduleDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobListFromJobScheduleDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobListFromJobScheduleHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobListFromJobScheduleHeaders; -} - -// @public (undocumented) -export interface JobListFromJobScheduleHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobListFromJobScheduleParameters = JobListFromJobScheduleQueryParam & JobListFromJobScheduleHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobListFromJobScheduleQueryParam { - // (undocumented) - queryParameters: JobListFromJobScheduleQueryParamProperties; -} - -// @public (undocumented) -export interface JobListFromJobScheduleQueryParamProperties { - $expand: string; - $filter: string; - $select: string; - maxresults?: number; - timeOut?: number; -} - -// @public (undocumented) -export interface JobListHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobListHeaders; -} - -// @public (undocumented) -export interface JobListHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobListParameters = JobListQueryParam & JobListHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobListPreparationAndReleaseTaskStatus { - get(options: JobListPreparationAndReleaseTaskStatusParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobListPreparationAndReleaseTaskStatus200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface JobListPreparationAndReleaseTaskStatus200Response extends HttpResponse { - // (undocumented) - body: BatchJobListPreparationAndReleaseTaskStatusResultOutput; - // (undocumented) - headers: RawHttpHeaders & JobListPreparationAndReleaseTaskStatus200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobListPreparationAndReleaseTaskStatusDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobListPreparationAndReleaseTaskStatusDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobListPreparationAndReleaseTaskStatusDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobListPreparationAndReleaseTaskStatusHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobListPreparationAndReleaseTaskStatusHeaders; -} - -// @public (undocumented) -export interface JobListPreparationAndReleaseTaskStatusHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobListPreparationAndReleaseTaskStatusParameters = JobListPreparationAndReleaseTaskStatusQueryParam & JobListPreparationAndReleaseTaskStatusHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobListPreparationAndReleaseTaskStatusQueryParam { - // (undocumented) - queryParameters: JobListPreparationAndReleaseTaskStatusQueryParamProperties; -} - -// @public (undocumented) -export interface JobListPreparationAndReleaseTaskStatusQueryParamProperties { - $filter: string; - $select: string; - maxresults?: number; - timeOut?: number; -} - -// @public (undocumented) -export interface JobListQueryParam { - // (undocumented) - queryParameters: JobListQueryParamProperties; -} - -// @public (undocumented) -export interface JobListQueryParamProperties { - $expand: string; - $filter: string; - $select: string; - maxresults?: number; - timeOut?: number; -} - -// @public -export interface JobManagerTask { - allowLowPriorityNode?: boolean; - applicationPackageReferences?: Array; - authenticationTokenSettings?: AuthenticationTokenSettings; - commandLine: string; - constraints?: TaskConstraints; - containerSettings?: TaskContainerSettings; - displayName?: string; - environmentSettings?: Array; - id: string; - killJobOnCompletion?: boolean; - outputFiles?: Array; - requiredSlots?: number; - resourceFiles?: Array; - runExclusive?: boolean; - userIdentity?: UserIdentity; -} - -// @public -export interface JobManagerTaskOutput { - allowLowPriorityNode?: boolean; - applicationPackageReferences?: Array; - authenticationTokenSettings?: AuthenticationTokenSettingsOutput; - commandLine: string; - constraints?: TaskConstraintsOutput; - containerSettings?: TaskContainerSettingsOutput; - displayName?: string; - environmentSettings?: Array; - id: string; - killJobOnCompletion?: boolean; - outputFiles?: Array; - requiredSlots?: number; - resourceFiles?: Array; - runExclusive?: boolean; - userIdentity?: UserIdentityOutput; -} - -// @public -export interface JobNetworkConfiguration { - subnetId: string; -} - -// @public -export interface JobNetworkConfigurationOutput { - subnetId: string; -} - -// @public (undocumented) -export interface JobPatch200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobPatch200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobPatch200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobPatchBodyParam { - body: BatchJob; -} - -// @public (undocumented) -export interface JobPatchDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobPatchDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobPatchDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobPatchHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobPatchHeaders; -} - -// @public (undocumented) -export interface JobPatchHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobPatchParameters = JobPatchQueryParam & JobPatchHeaderParam & JobPatchBodyParam & RequestParameters; - -// @public (undocumented) -export interface JobPatchQueryParam { - // (undocumented) - queryParameters?: JobPatchQueryParamProperties; -} - -// @public (undocumented) -export interface JobPatchQueryParamProperties { - timeOut?: number; -} - -// @public -export interface JobPreparationAndReleaseTaskExecutionInformationOutput { - jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformationOutput; - jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformationOutput; - nodeId?: string; - nodeUrl?: string; - poolId?: string; -} - -// @public -export interface JobPreparationTask { - commandLine: string; - constraints?: TaskConstraints; - containerSettings?: TaskContainerSettings; - environmentSettings?: Array; - id?: string; - rerunOnNodeRebootAfterSuccess?: boolean; - resourceFiles?: Array; - userIdentity?: UserIdentity; - waitForSuccess?: boolean; -} - -// @public -export interface JobPreparationTaskExecutionInformationOutput { - containerInfo?: TaskContainerExecutionInformationOutput; - endTime?: string; - exitCode?: number; - failureInfo?: TaskFailureInformationOutput; - lastRetryTime?: string; - result?: string; - retryCount: number; - startTime: string; - state: string; - taskRootDirectory?: string; - taskRootDirectoryUrl?: string; -} - -// @public -export interface JobPreparationTaskOutput { - commandLine: string; - constraints?: TaskConstraintsOutput; - containerSettings?: TaskContainerSettingsOutput; - environmentSettings?: Array; - id?: string; - rerunOnNodeRebootAfterSuccess?: boolean; - resourceFiles?: Array; - userIdentity?: UserIdentityOutput; - waitForSuccess?: boolean; -} - -// @public -export interface JobReleaseTask { - commandLine: string; - containerSettings?: TaskContainerSettings; - environmentSettings?: Array; - id?: string; - maxWallClockTime?: string; - resourceFiles?: Array; - retentionTime?: string; - userIdentity?: UserIdentity; -} - -// @public -export interface JobReleaseTaskExecutionInformationOutput { - containerInfo?: TaskContainerExecutionInformationOutput; - endTime?: string; - exitCode?: number; - failureInfo?: TaskFailureInformationOutput; - result?: string; - startTime: string; - state: string; - taskRootDirectory?: string; - taskRootDirectoryUrl?: string; -} - -// @public -export interface JobReleaseTaskOutput { - commandLine: string; - containerSettings?: TaskContainerSettingsOutput; - environmentSettings?: Array; - id?: string; - maxWallClockTime?: string; - resourceFiles?: Array; - retentionTime?: string; - userIdentity?: UserIdentityOutput; -} - -// @public (undocumented) -export interface JobScheduleAdd { - get(options?: JobScheduleListParameters): StreamableMethod; - post(options: JobScheduleAddParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobScheduleAdd201Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobScheduleAdd201Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobScheduleAdd201Headers; - // (undocumented) - status: "201"; -} - -// @public (undocumented) -export interface JobScheduleAddBodyParam { - body: BatchJobSchedule; -} - -// @public (undocumented) -export interface JobScheduleAddDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleAddDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleAddDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleAddHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleAddHeaders; -} - -// @public (undocumented) -export interface JobScheduleAddHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleAddParameters = JobScheduleAddQueryParam & JobScheduleAddHeaderParam & JobScheduleAddBodyParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleAddQueryParam { - // (undocumented) - queryParameters?: JobScheduleAddQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleAddQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobScheduleDelete202Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface JobScheduleDelete202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobScheduleDelete202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface JobScheduleDeleteDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleDeleteDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleDeleteDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleDeleteHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleDeleteHeaders; -} - -// @public (undocumented) -export interface JobScheduleDeleteHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleDeleteParameters = JobScheduleDeleteQueryParam & JobScheduleDeleteHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleDeleteQueryParam { - // (undocumented) - queryParameters?: JobScheduleDeleteQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleDeleteQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobScheduleDisable { - post(options?: JobScheduleDisableParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobScheduleDisable204Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobScheduleDisable204Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobScheduleDisable204Headers; - // (undocumented) - status: "204"; -} - -// @public (undocumented) -export interface JobScheduleDisableDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleDisableDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleDisableDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleDisableHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleDisableHeaders; -} - -// @public (undocumented) -export interface JobScheduleDisableHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleDisableParameters = JobScheduleDisableQueryParam & JobScheduleDisableHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleDisableQueryParam { - // (undocumented) - queryParameters?: JobScheduleDisableQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleDisableQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobScheduleEnable { - post(options?: JobScheduleEnableParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobScheduleEnable204Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobScheduleEnable204Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobScheduleEnable204Headers; - // (undocumented) - status: "204"; -} - -// @public (undocumented) -export interface JobScheduleEnableDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleEnableDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleEnableDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleEnableHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleEnableHeaders; -} - -// @public (undocumented) -export interface JobScheduleEnableHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleEnableParameters = JobScheduleEnableQueryParam & JobScheduleEnableHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleEnableQueryParam { - // (undocumented) - queryParameters?: JobScheduleEnableQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleEnableQueryParamProperties { - timeOut?: number; -} - -// @public -export interface JobScheduleExecutionInformation { - endTime?: Date | string; - nextRunTime?: Date | string; - recentJob?: RecentJob; -} - -// @public -export interface JobScheduleExecutionInformationOutput { - endTime?: string; - nextRunTime?: string; - recentJob?: RecentJobOutput; -} - -// @public (undocumented) -export interface JobScheduleExists { - delete(options?: JobScheduleDeleteParameters): StreamableMethod; - get(options?: JobScheduleGetParameters): StreamableMethod; - head(options?: JobScheduleExistsParameters): StreamableMethod; - patch(options: JobSchedulePatchParameters): StreamableMethod; - put(options: JobScheduleUpdateParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobScheduleExists200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface JobScheduleExists200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobScheduleExists200Headers; - // (undocumented) - status: "200"; -} - -// @public -export interface JobScheduleExists204Response extends HttpResponse { - // (undocumented) - status: "204"; -} - -// @public (undocumented) -export interface JobScheduleExistsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleExistsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleExistsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleExistsHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleExistsHeaders; -} - -// @public (undocumented) -export interface JobScheduleExistsHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleExistsParameters = JobScheduleExistsQueryParam & JobScheduleExistsHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleExistsQueryParam { - // (undocumented) - queryParameters?: JobScheduleExistsQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleExistsQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobScheduleGet200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface JobScheduleGet200Response extends HttpResponse { - // (undocumented) - body: BatchJobScheduleOutput; - // (undocumented) - headers: RawHttpHeaders & JobScheduleGet200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobScheduleGetDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleGetDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleGetDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleGetHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleGetHeaders; -} - -// @public (undocumented) -export interface JobScheduleGetHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleGetParameters = JobScheduleGetQueryParam & JobScheduleGetHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleGetQueryParam { - // (undocumented) - queryParameters?: JobScheduleGetQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleGetQueryParamProperties { - $expand?: string; - $select?: string; - timeOut?: number; -} - -// @public (undocumented) -export interface JobScheduleList200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface JobScheduleList200Response extends HttpResponse { - // (undocumented) - body: BatchJobScheduleListResultOutput; - // (undocumented) - headers: RawHttpHeaders & JobScheduleList200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobScheduleListDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleListDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleListDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleListHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleListHeaders; -} - -// @public (undocumented) -export interface JobScheduleListHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleListParameters = JobScheduleListQueryParam & JobScheduleListHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleListQueryParam { - // (undocumented) - queryParameters?: JobScheduleListQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleListQueryParamProperties { - $expand?: string; - $filter?: string; - $select?: string; - maxresults?: number; - timeOut?: number; -} - -// @public (undocumented) -export interface JobSchedulePatch200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobSchedulePatch200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobSchedulePatch200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobSchedulePatchBodyParam { - body: BatchJobSchedule; -} - -// @public (undocumented) -export interface JobSchedulePatchDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobSchedulePatchDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobSchedulePatchDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobSchedulePatchHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobSchedulePatchHeaders; -} - -// @public (undocumented) -export interface JobSchedulePatchHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobSchedulePatchParameters = JobSchedulePatchQueryParam & JobSchedulePatchHeaderParam & JobSchedulePatchBodyParam & RequestParameters; - -// @public (undocumented) -export interface JobSchedulePatchQueryParam { - // (undocumented) - queryParameters?: JobSchedulePatchQueryParamProperties; -} - -// @public (undocumented) -export interface JobSchedulePatchQueryParamProperties { - timeOut?: number; -} - -// @public -export interface JobScheduleStatistics { - kernelCPUTime: string; - lastUpdateTime: Date | string; - numFailedTasks: number; - numSucceededTasks: number; - numTaskRetries: number; - readIOGiB: number; - readIOps: number; - startTime: Date | string; - url: string; - userCPUTime: string; - waitTime: string; - wallClockTime: string; - writeIOGiB: number; - writeIOps: number; -} - -// @public -export interface JobScheduleStatisticsOutput { - kernelCPUTime: string; - lastUpdateTime: string; - numFailedTasks: number; - numSucceededTasks: number; - numTaskRetries: number; - readIOGiB: number; - readIOps: number; - startTime: string; - url: string; - userCPUTime: string; - waitTime: string; - wallClockTime: string; - writeIOGiB: number; - writeIOps: number; -} - -// @public (undocumented) -export interface JobScheduleTerminate { - post(options?: JobScheduleTerminateParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobScheduleTerminate202Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobScheduleTerminate202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobScheduleTerminate202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface JobScheduleTerminateDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleTerminateDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleTerminateDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleTerminateHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleTerminateHeaders; -} - -// @public (undocumented) -export interface JobScheduleTerminateHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleTerminateParameters = JobScheduleTerminateQueryParam & JobScheduleTerminateHeaderParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleTerminateQueryParam { - // (undocumented) - queryParameters?: JobScheduleTerminateQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleTerminateQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobScheduleUpdate200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobScheduleUpdate200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobScheduleUpdate200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobScheduleUpdateBodyParam { - body: BatchJobSchedule; -} - -// @public (undocumented) -export interface JobScheduleUpdateDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobScheduleUpdateDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobScheduleUpdateDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobScheduleUpdateHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobScheduleUpdateHeaders; -} - -// @public (undocumented) -export interface JobScheduleUpdateHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobScheduleUpdateParameters = JobScheduleUpdateQueryParam & JobScheduleUpdateHeaderParam & JobScheduleUpdateBodyParam & RequestParameters; - -// @public (undocumented) -export interface JobScheduleUpdateQueryParam { - // (undocumented) - queryParameters?: JobScheduleUpdateQueryParamProperties; -} - -// @public (undocumented) -export interface JobScheduleUpdateQueryParamProperties { - timeOut?: number; -} - -// @public -export interface JobSchedulingError { - category: string; - code?: string; - details?: Array; - message?: string; -} - -// @public -export interface JobSchedulingErrorOutput { - category: string; - code?: string; - details?: Array; - message?: string; -} - -// @public -export interface JobSpecification { - allowTaskPreemption?: boolean; - commonEnvironmentSettings?: Array; - constraints?: JobConstraints; - displayName?: string; - jobManagerTask?: JobManagerTask; - jobPreparationTask?: JobPreparationTask; - jobReleaseTask?: JobReleaseTask; - maxParallelTasks?: number; - metadata?: Array; - networkConfiguration?: JobNetworkConfiguration; - onAllTasksComplete?: string; - onTaskFailure?: string; - poolInfo: PoolInformation; - priority?: number; - usesTaskDependencies?: boolean; -} - -// @public -export interface JobSpecificationOutput { - allowTaskPreemption?: boolean; - commonEnvironmentSettings?: Array; - constraints?: JobConstraintsOutput; - displayName?: string; - jobManagerTask?: JobManagerTaskOutput; - jobPreparationTask?: JobPreparationTaskOutput; - jobReleaseTask?: JobReleaseTaskOutput; - maxParallelTasks?: number; - metadata?: Array; - networkConfiguration?: JobNetworkConfigurationOutput; - onAllTasksComplete?: string; - onTaskFailure?: string; - poolInfo: PoolInformationOutput; - priority?: number; - usesTaskDependencies?: boolean; -} - -// @public -export interface JobStatistics { - kernelCPUTime: string; - lastUpdateTime: Date | string; - numFailedTasks: number; - numSucceededTasks: number; - numTaskRetries: number; - readIOGiB: number; - readIOps: number; - startTime: Date | string; - userCPUTime: string; - waitTime: string; - wallClockTime: string; - writeIOGiB: number; - writeIOps: number; -} - -// @public -export interface JobStatisticsOutput { - kernelCPUTime: string; - lastUpdateTime: string; - numFailedTasks: number; - numSucceededTasks: number; - numTaskRetries: number; - readIOGiB: number; - readIOps: number; - startTime: string; - readonly url: string; - userCPUTime: string; - waitTime: string; - wallClockTime: string; - writeIOGiB: number; - writeIOps: number; -} - -// @public (undocumented) -export interface JobTerminate { - post(options: JobTerminateParameters): StreamableMethod; -} - -// @public (undocumented) -export interface JobTerminate202Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobTerminate202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobTerminate202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface JobTerminateBodyParam { - body: BatchJobTerminateParameters; -} - -// @public (undocumented) -export interface JobTerminateDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobTerminateDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobTerminateDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobTerminateHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobTerminateHeaders; -} - -// @public (undocumented) -export interface JobTerminateHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobTerminateParameters = JobTerminateQueryParam & JobTerminateHeaderParam & JobTerminateBodyParam & RequestParameters; - -// @public (undocumented) -export interface JobTerminateQueryParam { - // (undocumented) - queryParameters?: JobTerminateQueryParamProperties; -} - -// @public (undocumented) -export interface JobTerminateQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface JobUpdate200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface JobUpdate200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & JobUpdate200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface JobUpdateBodyParam { - body: BatchJob; -} - -// @public (undocumented) -export interface JobUpdateDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface JobUpdateDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & JobUpdateDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface JobUpdateHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & JobUpdateHeaders; -} - -// @public (undocumented) -export interface JobUpdateHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type JobUpdateParameters = JobUpdateQueryParam & JobUpdateHeaderParam & JobUpdateBodyParam & RequestParameters; - -// @public (undocumented) -export interface JobUpdateQueryParam { - // (undocumented) - queryParameters?: JobUpdateQueryParamProperties; -} - -// @public (undocumented) -export interface JobUpdateQueryParamProperties { - timeOut?: number; -} - -// @public -export interface LinuxUserConfiguration { - gid?: number; - sshPrivateKey?: string; - uid?: number; -} - -// @public -export interface LinuxUserConfigurationOutput { - gid?: number; - sshPrivateKey?: string; - uid?: number; -} - -// @public -export interface MetadataItem { - name: string; - value: string; -} - -// @public -export interface MetadataItemOutput { - name: string; - value: string; -} - -// @public -export interface MountConfiguration { - azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; - azureFileShareConfiguration?: AzureFileShareConfiguration; - cifsMountConfiguration?: CifsMountConfiguration; - nfsMountConfiguration?: NFSMountConfiguration; -} - -// @public -export interface MountConfigurationOutput { - azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfigurationOutput; - azureFileShareConfiguration?: AzureFileShareConfigurationOutput; - cifsMountConfiguration?: CifsMountConfigurationOutput; - nfsMountConfiguration?: NFSMountConfigurationOutput; -} - -// @public -export interface MultiInstanceSettings { - commonResourceFiles?: Array; - coordinationCommandLine: string; - numberOfInstances?: number; -} - -// @public -export interface MultiInstanceSettingsOutput { - commonResourceFiles?: Array; - coordinationCommandLine: string; - numberOfInstances?: number; -} - -// @public -export interface NameValuePair { - name?: string; - value?: string; -} - -// @public -export interface NameValuePairOutput { - name?: string; - value?: string; -} - -// @public -export interface NetworkConfiguration { - dynamicVNetAssignmentScope?: string; - endpointConfiguration?: PoolEndpointConfiguration; - publicIPAddressConfiguration?: PublicIPAddressConfiguration; - subnetId?: string; -} - -// @public -export interface NetworkConfigurationOutput { - dynamicVNetAssignmentScope?: string; - endpointConfiguration?: PoolEndpointConfigurationOutput; - publicIPAddressConfiguration?: PublicIPAddressConfigurationOutput; - subnetId?: string; -} - -// @public -export interface NetworkSecurityGroupRule { - access: string; - priority: number; - sourceAddressPrefix: string; - sourcePortRanges?: string[]; -} - -// @public -export interface NetworkSecurityGroupRuleOutput { - access: string; - priority: number; - sourceAddressPrefix: string; - sourcePortRanges?: string[]; -} - -// @public -export interface NFSMountConfiguration { - mountOptions?: string; - relativeMountPath: string; - source: string; -} - -// @public -export interface NFSMountConfigurationOutput { - mountOptions?: string; - relativeMountPath: string; - source: string; -} - -// @public -export interface NodeAgentInformationOutput { - lastUpdateTime: string; - version: string; -} - -// @public -export interface NodeCountsOutput { - creating: number; - idle: number; - leavingPool: number; - offline: number; - preempted: number; - rebooting: number; - reimaging: number; - running: number; - starting: number; - startTaskFailed: number; - total: number; - unknown: number; - unusable: number; - waitingForStartTask: number; -} - -// @public -export interface NodeDisableSchedulingParameters { - nodeDisableSchedulingOption?: string; -} - -// @public -export interface NodeFileListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface NodeFileOutput { - isDirectory?: boolean; - name?: string; - properties?: FilePropertiesOutput; - url?: string; -} - -// @public -export interface NodePlacementConfiguration { - policy?: string; -} - -// @public -export interface NodePlacementConfigurationOutput { - policy?: string; -} - -// @public -export interface NodeRebootParameters { - nodeRebootOption?: string; -} - -// @public -export interface NodeReimageParameters { - nodeReimageOption?: string; -} - -// @public -export interface NodeRemoveParameters { - nodeDeallocationOption?: string; - nodeList: string[]; - resizeTimeout?: string; -} - -// @public -export interface NodeUpdateUserParameters { - expiryTime?: Date | string; - password?: string; - sshPublicKey?: string; -} - -// @public -export interface NodeVMExtensionListOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface NodeVMExtensionOutput { - instanceView?: VMExtensionInstanceViewOutput; - provisioningState?: string; - vmExtension?: VMExtensionOutput; -} - -// @public (undocumented) -interface Object_2 { -} -export { Object_2 as Object } - -// @public (undocumented) -export interface ObjectOutput { -} - -// @public -export interface OSDisk { - ephemeralOSDiskSettings?: DiffDiskSettings; -} - -// @public -export interface OSDiskOutput { - ephemeralOSDiskSettings?: DiffDiskSettingsOutput; -} - -// @public -export interface OutputFile { - destination: OutputFileDestination; - filePattern: string; - uploadOptions: OutputFileUploadOptions; -} - -// @public -export interface OutputFileBlobContainerDestination { - containerUrl: string; - identityReference?: ComputeNodeIdentityReference; - path?: string; - uploadHeaders?: Array; -} - -// @public -export interface OutputFileBlobContainerDestinationOutput { - containerUrl: string; - identityReference?: ComputeNodeIdentityReferenceOutput; - path?: string; - uploadHeaders?: Array; -} - -// @public -export interface OutputFileDestination { - container?: OutputFileBlobContainerDestination; -} - -// @public -export interface OutputFileDestinationOutput { - container?: OutputFileBlobContainerDestinationOutput; -} - -// @public -export interface OutputFileOutput { - destination: OutputFileDestinationOutput; - filePattern: string; - uploadOptions: OutputFileUploadOptionsOutput; -} - -// @public -export interface OutputFileUploadOptions { - uploadCondition: string; -} - -// @public -export interface OutputFileUploadOptionsOutput { - uploadCondition: string; -} - -// @public -export function paginate(client: Client, initialResponse: TResponse, options?: PagingOptions): PagedAsyncIterableIterator>; - -// @public -export type PaginateReturn = TResult extends { - body: { - value?: infer TPage; - }; -} ? GetArrayType : Array; - -// @public -export interface PagingOptions { - customGetPage?: GetPage[]>; -} - -// @public (undocumented) -export interface PoolAdd { - get(options?: PoolListParameters): StreamableMethod; - post(options: PoolAddParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolAdd201Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolAdd201Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolAdd201Headers; - // (undocumented) - status: "201"; -} - -// @public (undocumented) -export interface PoolAddBodyParam { - body: BatchPool; -} - -// @public (undocumented) -export interface PoolAddDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolAddDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolAddDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolAddHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolAddHeaders; -} - -// @public (undocumented) -export interface PoolAddHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolAddParameters = PoolAddQueryParam & PoolAddHeaderParam & PoolAddBodyParam & RequestParameters; - -// @public (undocumented) -export interface PoolAddQueryParam { - // (undocumented) - queryParameters?: PoolAddQueryParamProperties; -} - -// @public (undocumented) -export interface PoolAddQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface PoolDelete { - delete(options?: PoolDeleteParameters): StreamableMethod; - get(options: PoolGetParameters): StreamableMethod; - head(options?: PoolExistsParameters): StreamableMethod; - patch(options: PoolPatchParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolDelete202Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface PoolDelete202Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolDelete202Headers; - // (undocumented) - status: "202"; -} - -// @public (undocumented) -export interface PoolDeleteDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolDeleteDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolDeleteDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolDeleteHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolDeleteHeaders; -} - -// @public (undocumented) -export interface PoolDeleteHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolDeleteParameters = PoolDeleteQueryParam & PoolDeleteHeaderParam & RequestParameters; - -// @public (undocumented) -export interface PoolDeleteQueryParam { - // (undocumented) - queryParameters?: PoolDeleteQueryParamProperties; -} - -// @public (undocumented) -export interface PoolDeleteQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface PoolDisableAutoScale { - post(options?: PoolDisableAutoScaleParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolDisableAutoScale200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolDisableAutoScale200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolDisableAutoScale200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolDisableAutoScaleDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolDisableAutoScaleDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolDisableAutoScaleDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolDisableAutoScaleHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolDisableAutoScaleHeaders; -} - -// @public (undocumented) -export interface PoolDisableAutoScaleHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolDisableAutoScaleParameters = PoolDisableAutoScaleQueryParam & PoolDisableAutoScaleHeaderParam & RequestParameters; - -// @public (undocumented) -export interface PoolDisableAutoScaleQueryParam { - // (undocumented) - queryParameters?: PoolDisableAutoScaleQueryParamProperties; -} - -// @public (undocumented) -export interface PoolDisableAutoScaleQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface PoolEnableAutoScale { - post(options: PoolEnableAutoScaleParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolEnableAutoScale200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolEnableAutoScale200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolEnableAutoScale200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolEnableAutoScaleBodyParam { - body: BatchPoolEnableAutoScaleParameters; -} - -// @public (undocumented) -export interface PoolEnableAutoScaleDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolEnableAutoScaleDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolEnableAutoScaleDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolEnableAutoScaleHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolEnableAutoScaleHeaders; -} - -// @public (undocumented) -export interface PoolEnableAutoScaleHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolEnableAutoScaleParameters = PoolEnableAutoScaleQueryParam & PoolEnableAutoScaleHeaderParam & PoolEnableAutoScaleBodyParam & RequestParameters; - -// @public (undocumented) -export interface PoolEnableAutoScaleQueryParam { - // (undocumented) - queryParameters?: PoolEnableAutoScaleQueryParamProperties; -} - -// @public (undocumented) -export interface PoolEnableAutoScaleQueryParamProperties { - timeOut?: number; -} - -// @public -export interface PoolEndpointConfiguration { - inboundNATPools: Array; -} - -// @public -export interface PoolEndpointConfigurationOutput { - inboundNATPools: Array; -} - -// @public (undocumented) -export interface PoolEvaluateAutoScale { - post(options: PoolEvaluateAutoScaleParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolEvaluateAutoScale200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolEvaluateAutoScale200Response extends HttpResponse { - // (undocumented) - body: AutoScaleRunOutput; - // (undocumented) - headers: RawHttpHeaders & PoolEvaluateAutoScale200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolEvaluateAutoScaleBodyParam { - body: BatchPoolEvaluateAutoScaleParameters; -} - -// @public (undocumented) -export interface PoolEvaluateAutoScaleDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolEvaluateAutoScaleDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolEvaluateAutoScaleDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolEvaluateAutoScaleHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolEvaluateAutoScaleHeaders; -} - -// @public (undocumented) -export interface PoolEvaluateAutoScaleHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolEvaluateAutoScaleParameters = PoolEvaluateAutoScaleQueryParam & PoolEvaluateAutoScaleHeaderParam & PoolEvaluateAutoScaleBodyParam & RequestParameters; - -// @public (undocumented) -export interface PoolEvaluateAutoScaleQueryParam { - // (undocumented) - queryParameters?: PoolEvaluateAutoScaleQueryParamProperties; -} - -// @public (undocumented) -export interface PoolEvaluateAutoScaleQueryParamProperties { - timeOut?: number; -} - -// @public -export interface PoolExists404Response extends HttpResponse { - // (undocumented) - status: "404"; -} - -// @public (undocumented) -export interface PoolExistsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolExistsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolExistsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolExistsHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolExistsHeaders; -} - -// @public (undocumented) -export interface PoolExistsHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolExistsParameters = PoolExistsQueryParam & PoolExistsHeaderParam & RequestParameters; - -// @public (undocumented) -export interface PoolExistsQueryParam { - // (undocumented) - queryParameters?: PoolExistsQueryParamProperties; -} - -// @public (undocumented) -export interface PoolExistsQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface PoolGet200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface PoolGet200Response extends HttpResponse { - // (undocumented) - body: BatchPoolOutput; - // (undocumented) - headers: RawHttpHeaders & PoolGet200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolGetAllLifetimeStatistics { - get(options?: PoolGetAllLifetimeStatisticsParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolGetAllLifetimeStatistics200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface PoolGetAllLifetimeStatistics200Response extends HttpResponse { - // (undocumented) - body: PoolStatisticsOutput; - // (undocumented) - headers: RawHttpHeaders & PoolGetAllLifetimeStatistics200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolGetAllLifetimeStatisticsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolGetAllLifetimeStatisticsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolGetAllLifetimeStatisticsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolGetAllLifetimeStatisticsHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolGetAllLifetimeStatisticsHeaders; -} - -// @public (undocumented) -export interface PoolGetAllLifetimeStatisticsHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolGetAllLifetimeStatisticsParameters = PoolGetAllLifetimeStatisticsQueryParam & PoolGetAllLifetimeStatisticsHeaderParam & RequestParameters; - -// @public (undocumented) -export interface PoolGetAllLifetimeStatisticsQueryParam { - // (undocumented) - queryParameters?: PoolGetAllLifetimeStatisticsQueryParamProperties; -} - -// @public (undocumented) -export interface PoolGetAllLifetimeStatisticsQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface PoolGetDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolGetDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolGetDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolGetHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolGetHeaders; -} - -// @public (undocumented) -export interface PoolGetHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolGetParameters = PoolGetQueryParam & PoolGetHeaderParam & RequestParameters; - -// @public (undocumented) -export interface PoolGetQueryParam { - // (undocumented) - queryParameters: PoolGetQueryParamProperties; -} - -// @public (undocumented) -export interface PoolGetQueryParamProperties { - $expand: string; - $select: string; - timeOut?: number; -} - -// @public -export interface PoolInformation { - autoPoolSpecification?: AutoPoolSpecification; - poolId?: string; -} - -// @public -export interface PoolInformationOutput { - autoPoolSpecification?: AutoPoolSpecificationOutput; - poolId?: string; -} - -// @public (undocumented) -export interface PoolList200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface PoolList200Response extends HttpResponse { - // (undocumented) - body: BatchPoolListResultOutput; - // (undocumented) - headers: RawHttpHeaders & PoolList200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolListDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolListDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolListDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolListHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolListHeaders; -} - -// @public (undocumented) -export interface PoolListHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolListParameters = PoolListQueryParam & PoolListHeaderParam & RequestParameters; - -// @public (undocumented) -export interface PoolListQueryParam { - // (undocumented) - queryParameters?: PoolListQueryParamProperties; -} - -// @public (undocumented) -export interface PoolListQueryParamProperties { - $expand?: string; - $filter?: string; - $select?: string; - maxresults?: number; - timeOut?: number; -} - -// @public (undocumented) -export interface PoolListUsageMetrics { - get(options?: PoolListUsageMetricsParameters): StreamableMethod; -} - -// @public -export interface PoolListUsageMetrics200Response extends HttpResponse { - // (undocumented) - body: PoolUsageMetricsListOutput; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolListUsageMetricsDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolListUsageMetricsDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolListUsageMetricsDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export type PoolListUsageMetricsParameters = RequestParameters; - -// @public -export interface PoolNodeCountsListResultOutput { - "odata.nextLink"?: string; - value?: Array; -} - -// @public -export interface PoolNodeCountsOutput { - dedicated?: NodeCountsOutput; - lowPriority?: NodeCountsOutput; - readonly poolId: string; -} - -// @public (undocumented) -export interface PoolPatch200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolPatch200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolPatch200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolPatchBodyParam { - body: BatchPool; -} - -// @public (undocumented) -export interface PoolPatchDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolPatchDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolPatchDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolPatchHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolPatchHeaders; -} - -// @public (undocumented) -export interface PoolPatchHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolPatchParameters = PoolPatchQueryParam & PoolPatchHeaderParam & PoolPatchBodyParam & RequestParameters; - -// @public (undocumented) -export interface PoolPatchQueryParam { - // (undocumented) - queryParameters?: PoolPatchQueryParamProperties; -} - -// @public (undocumented) -export interface PoolPatchQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface PoolRemoveNodes { - post(options: PoolRemoveNodesParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolRemoveNodes200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolRemoveNodes200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolRemoveNodes200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolRemoveNodesBodyParam { - body: NodeRemoveParameters; -} - -// @public (undocumented) -export interface PoolRemoveNodesDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolRemoveNodesDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolRemoveNodesDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolRemoveNodesHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolRemoveNodesHeaders; -} - -// @public (undocumented) -export interface PoolRemoveNodesHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolRemoveNodesParameters = PoolRemoveNodesQueryParam & PoolRemoveNodesHeaderParam & PoolRemoveNodesBodyParam & RequestParameters; - -// @public (undocumented) -export interface PoolRemoveNodesQueryParam { - // (undocumented) - queryParameters?: PoolRemoveNodesQueryParamProperties; -} - -// @public (undocumented) -export interface PoolRemoveNodesQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface PoolResize { - post(options: PoolResizeParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolResize200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolResize200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolResize200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolResizeBodyParam { - body: BatchPoolResizeParameters; -} - -// @public (undocumented) -export interface PoolResizeDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolResizeDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolResizeDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolResizeHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolResizeHeaders; -} - -// @public (undocumented) -export interface PoolResizeHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolResizeParameters = PoolResizeQueryParam & PoolResizeHeaderParam & PoolResizeBodyParam & RequestParameters; - -// @public (undocumented) -export interface PoolResizeQueryParam { - // (undocumented) - queryParameters?: PoolResizeQueryParamProperties; -} - -// @public (undocumented) -export interface PoolResizeQueryParamProperties { - timeOut?: number; -} - -// @public -export interface PoolSpecification { - applicationLicenses?: string[]; - applicationPackageReferences?: Array; - autoScaleEvaluationInterval?: string; - autoScaleFormula?: string; - certificateReferences?: Array; - cloudServiceConfiguration?: CloudServiceConfiguration; - displayName?: string; - enableAutoScale?: boolean; - enableInterNodeCommunication?: boolean; - metadata?: Array; - mountConfiguration?: Array; - networkConfiguration?: NetworkConfiguration; - resizeTimeout?: string; - startTask?: StartTask; - targetDedicatedNodes?: number; - targetLowPriorityNodes?: number; - targetNodeCommunicationMode?: string; - taskSchedulingPolicy?: TaskSchedulingPolicy; - taskSlotsPerNode?: number; - userAccounts?: Array; - virtualMachineConfiguration?: VirtualMachineConfiguration; - vmSize: string; -} - -// @public -export interface PoolSpecificationOutput { - applicationLicenses?: string[]; - applicationPackageReferences?: Array; - autoScaleEvaluationInterval?: string; - autoScaleFormula?: string; - certificateReferences?: Array; - cloudServiceConfiguration?: CloudServiceConfigurationOutput; - displayName?: string; - enableAutoScale?: boolean; - enableInterNodeCommunication?: boolean; - metadata?: Array; - mountConfiguration?: Array; - networkConfiguration?: NetworkConfigurationOutput; - resizeTimeout?: string; - startTask?: StartTaskOutput; - targetDedicatedNodes?: number; - targetLowPriorityNodes?: number; - targetNodeCommunicationMode?: string; - taskSchedulingPolicy?: TaskSchedulingPolicyOutput; - taskSlotsPerNode?: number; - userAccounts?: Array; - virtualMachineConfiguration?: VirtualMachineConfigurationOutput; - vmSize: string; -} - -// @public -export interface PoolStatistics { - lastUpdateTime: Date | string; - resourceStats?: ResourceStatistics; - startTime: Date | string; - usageStats?: UsageStatistics; -} - -// @public -export interface PoolStatisticsOutput { - lastUpdateTime: string; - resourceStats?: ResourceStatisticsOutput; - startTime: string; - readonly url: string; - usageStats?: UsageStatisticsOutput; -} - -// @public (undocumented) -export interface PoolStopResize { - post(options?: PoolStopResizeParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolStopResize200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolStopResize200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolStopResize200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolStopResizeDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolStopResizeDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolStopResizeDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolStopResizeHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolStopResizeHeaders; -} - -// @public (undocumented) -export interface PoolStopResizeHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolStopResizeParameters = PoolStopResizeQueryParam & PoolStopResizeHeaderParam & RequestParameters; - -// @public (undocumented) -export interface PoolStopResizeQueryParam { - // (undocumented) - queryParameters?: PoolStopResizeQueryParamProperties; -} - -// @public (undocumented) -export interface PoolStopResizeQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface PoolUpdateProperties { - post(options: PoolUpdatePropertiesParameters): StreamableMethod; -} - -// @public (undocumented) -export interface PoolUpdateProperties200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface PoolUpdateProperties200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & PoolUpdateProperties200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface PoolUpdatePropertiesBodyParam { - body: BatchPool; -} - -// @public (undocumented) -export interface PoolUpdatePropertiesDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface PoolUpdatePropertiesDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & PoolUpdatePropertiesDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface PoolUpdatePropertiesHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & PoolUpdatePropertiesHeaders; -} - -// @public (undocumented) -export interface PoolUpdatePropertiesHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type PoolUpdatePropertiesParameters = PoolUpdatePropertiesQueryParam & PoolUpdatePropertiesHeaderParam & PoolUpdatePropertiesBodyParam & RequestParameters; - -// @public (undocumented) -export interface PoolUpdatePropertiesQueryParam { - // (undocumented) - queryParameters?: PoolUpdatePropertiesQueryParamProperties; -} - -// @public (undocumented) -export interface PoolUpdatePropertiesQueryParamProperties { - timeOut?: number; -} - -// @public -export type PoolUsageMetricsListOutput = Paged; - -// @public -export interface PoolUsageMetricsOutput { - endTime: string; - readonly poolId: string; - startTime: string; - totalCoreHours: number; - vmSize: string; -} - -// @public -export interface PublicIPAddressConfiguration { - ipAddressIds?: string[]; - provision?: string; -} - -// @public -export interface PublicIPAddressConfigurationOutput { - ipAddressIds?: string[]; - provision?: string; -} - -// @public -export interface RecentJob { - id?: string; - url?: string; -} - -// @public -export interface RecentJobOutput { - id?: string; - url?: string; -} - -// @public -export interface ResizeError { - code?: string; - message?: string; - values?: Array; -} - -// @public -export interface ResizeErrorOutput { - code?: string; - message?: string; - values?: Array; -} - -// @public -export interface ResourceFile { - autoStorageContainerName?: string; - blobPrefix?: string; - fileMode?: string; - filePath?: string; - httpUrl?: string; - identityReference?: ComputeNodeIdentityReference; - storageContainerUrl?: string; -} - -// @public -export interface ResourceFileOutput { - autoStorageContainerName?: string; - blobPrefix?: string; - fileMode?: string; - filePath?: string; - httpUrl?: string; - identityReference?: ComputeNodeIdentityReferenceOutput; - storageContainerUrl?: string; -} - -// @public -export interface ResourceStatistics { - avgCPUPercentage: number; - avgDiskGiB: number; - avgMemoryGiB: number; - diskReadGiB: number; - diskReadIOps: number; - diskWriteGiB: number; - diskWriteIOps: number; - lastUpdateTime: Date | string; - networkReadGiB: number; - networkWriteGiB: number; - peakDiskGiB: number; - peakMemoryGiB: number; - startTime: Date | string; -} - -// @public -export interface ResourceStatisticsOutput { - avgCPUPercentage: number; - avgDiskGiB: number; - avgMemoryGiB: number; - diskReadGiB: number; - diskReadIOps: number; - diskWriteGiB: number; - diskWriteIOps: number; - lastUpdateTime: string; - networkReadGiB: number; - networkWriteGiB: number; - peakDiskGiB: number; - peakMemoryGiB: number; - startTime: string; -} - -// @public (undocumented) -export interface Routes { - (path: "/applications"): ApplicationOperationsList; - (path: "/applications/{applicationId}", applicationId: string): ApplicationOperationsGet; - (path: "/poolusagemetrics"): PoolListUsageMetrics; - (path: "/lifetimepoolstats"): PoolGetAllLifetimeStatistics; - (path: "/pools"): PoolAdd; - (path: "/pools/{poolId}", poolId: string): PoolDelete; - (path: "/pools/{poolId}/disableautoscale", poolId: string): PoolDisableAutoScale; - (path: "/pools/{poolId}/enableautoscale", poolId: string): PoolEnableAutoScale; - (path: "/pools/{poolId}/evaluateautoscale", poolId: string): PoolEvaluateAutoScale; - (path: "/pools/{poolId}/resize", poolId: string): PoolResize; - (path: "/pools/{poolId}/stopresize", poolId: string): PoolStopResize; - (path: "/pools/{poolId}/updateproperties", poolId: string): PoolUpdateProperties; - (path: "/pools/{poolId}/removenodes", poolId: string): PoolRemoveNodes; - (path: "/supportedimages"): AccountListSupportedImages; - (path: "/nodecounts"): AccountListPoolNodeCounts; - (path: "/lifetimejobstats"): JobGetAllLifetimeStatistics; - (path: "/jobs/{jobId}", jobId: string): JobDelete; - (path: "/jobs/{jobId}/disable", jobId: string): JobDisable; - (path: "/jobs/{jobId}/enable", jobId: string): JobEnable; - (path: "/jobs/{jobId}/terminate", jobId: string): JobTerminate; - (path: "/jobs"): JobAdd; - (path: "/jobschedules/{jobScheduleId}/jobs", jobScheduleId: string): JobListFromJobSchedule; - (path: "/jobs/{jobId}/jobpreparationandreleasetaskstatus", jobId: string): JobListPreparationAndReleaseTaskStatus; - (path: "/jobs/{jobId}/taskcounts", jobId: string): JobGetTaskCounts; - (path: "/certificates"): CertificateOperationsAdd; - (path: "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete", thumbprintAlgorithm: string, thumbprint: string): CertificateOperationsCancelDeletion; - (path: "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", thumbprintAlgorithm: string, thumbprint: string): CertificateOperationsDelete; - (path: "/jobs/{jobId}/tasks/{taskId}/files/{filePath}", jobId: string, taskId: string, filePath: string): FileDeleteFromTask; - (path: "/pools/{poolId}/nodes/{nodeId}/files/{filePath}", poolId: string, nodeId: string, filePath: string): FileDeleteFromComputeNode; - (path: "/jobs/{jobId}/tasks/{taskId}/files", jobId: string, taskId: string): FileListFromTask; - (path: "/pools/{poolId}/nodes/{nodeId}/files", poolId: string, nodeId: string): FileListFromComputeNode; - (path: "/jobschedules/{jobScheduleId}", jobScheduleId: string): JobScheduleExists; - (path: "/jobschedules/{jobScheduleId}/disable", jobScheduleId: string): JobScheduleDisable; - (path: "/jobschedules/{jobScheduleId}/enable", jobScheduleId: string): JobScheduleEnable; - (path: "/jobschedules/{jobScheduleId}/terminate", jobScheduleId: string): JobScheduleTerminate; - (path: "/jobschedules"): JobScheduleAdd; - (path: "/jobs/{jobId}/tasks", jobId: string): TaskAdd; - (path: "/jobs/{jobId}/addtaskcollection", jobId: string): TaskAddCollection; - (path: "/jobs/{jobId}/tasks/{taskId}", jobId: string, taskId: string): TaskDelete; - (path: "/jobs/{jobId}/tasks/{taskId}/subtasksinfo", jobId: string, taskId: string): TaskListSubtasks; - (path: "/jobs/{jobId}/tasks/{taskId}/terminate", jobId: string, taskId: string): TaskTerminate; - (path: "/jobs/{jobId}/tasks/{taskId}/reactivate", jobId: string, taskId: string): TaskReactivate; - (path: "/pools/{poolId}/nodes/{nodeId}/users", poolId: string, nodeId: string): ComputeNodeOperationsAddUser; - (path: "/pools/{poolId}/nodes/{nodeId}/users/{userName}", poolId: string, nodeId: string, userName: string): ComputeNodeOperationsDeleteUser; - (path: "/pools/{poolId}/nodes/{nodeId}", poolId: string, nodeId: string): ComputeNodeOperationsGet; - (path: "/pools/{poolId}/nodes/{nodeId}/reboot", poolId: string, nodeId: string): ComputeNodeOperationsReboot; - (path: "/pools/{poolId}/nodes/{nodeId}/reimage", poolId: string, nodeId: string): ComputeNodeOperationsReimage; - (path: "/pools/{poolId}/nodes/{nodeId}/disablescheduling", poolId: string, nodeId: string): ComputeNodeOperationsDisableScheduling; - (path: "/pools/{poolId}/nodes/{nodeId}/enablescheduling", poolId: string, nodeId: string): ComputeNodeOperationsEnableScheduling; - (path: "/pools/{poolId}/nodes/{nodeId}/remoteloginsettings", poolId: string, nodeId: string): ComputeNodeOperationsGetRemoteLoginSettings; - (path: "/pools/{poolId}/nodes/{nodeId}/rdp", poolId: string, nodeId: string): ComputeNodeOperationsGetRemoteDesktop; - (path: "/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs", poolId: string, nodeId: string): ComputeNodeOperationsUploadBatchServiceLogs; - (path: "/pools/{poolId}/nodes", poolId: string): ComputeNodeOperationsList; - (path: "/pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}", poolId: string, nodeId: string, extensionName: string): ComputeNodeExtensionOperationsGet; - (path: "/pools/{poolId}/nodes/{nodeId}/extensions", poolId: string, nodeId: string): ComputeNodeExtensionOperationsList; -} - -// @public -export interface Schedule { - doNotRunAfter?: Date | string; - doNotRunUntil?: Date | string; - recurrenceInterval?: string; - startWindow?: string; -} - -// @public -export interface ScheduleOutput { - doNotRunAfter?: string; - doNotRunUntil?: string; - recurrenceInterval?: string; - startWindow?: string; -} - -// @public -export interface StartTask { - commandLine: string; - containerSettings?: TaskContainerSettings; - environmentSettings?: Array; - maxTaskRetryCount?: number; - resourceFiles?: Array; - userIdentity?: UserIdentity; - waitForSuccess?: boolean; -} - -// @public -export interface StartTaskInformationOutput { - containerInfo?: TaskContainerExecutionInformationOutput; - endTime?: string; - exitCode?: number; - failureInfo?: TaskFailureInformationOutput; - lastRetryTime?: string; - result?: string; - retryCount: number; - startTime: string; - state: string; -} - -// @public -export interface StartTaskOutput { - commandLine: string; - containerSettings?: TaskContainerSettingsOutput; - environmentSettings?: Array; - maxTaskRetryCount?: number; - resourceFiles?: Array; - userIdentity?: UserIdentityOutput; - waitForSuccess?: boolean; -} - -// @public -export interface SubtaskInformationOutput { - containerInfo?: TaskContainerExecutionInformationOutput; - endTime?: string; - exitCode?: number; - failureInfo?: TaskFailureInformationOutput; - id?: number; - nodeInfo?: ComputeNodeInformationOutput; - previousState?: string; - previousStateTransitionTime?: string; - result?: string; - startTime?: string; - state?: string; - stateTransitionTime?: string; -} - -// @public (undocumented) -export interface TaskAdd { - get(options?: TaskListParameters): StreamableMethod; - post(options: TaskAddParameters): StreamableMethod; -} - -// @public (undocumented) -export interface TaskAdd201Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface TaskAdd201Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & TaskAdd201Headers; - // (undocumented) - status: "201"; -} - -// @public (undocumented) -export interface TaskAddBodyParam { - body: BatchTask; -} - -// @public (undocumented) -export interface TaskAddCollection { - post(options: TaskAddCollectionParameters): StreamableMethod; -} - -// @public (undocumented) -export interface TaskAddCollection200Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface TaskAddCollection200Response extends HttpResponse { - // (undocumented) - body: TaskAddCollectionResultOutput; - // (undocumented) - headers: RawHttpHeaders & TaskAddCollection200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface TaskAddCollectionBodyParam { - body: BatchTaskCollection; -} - -// @public (undocumented) -export interface TaskAddCollectionDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskAddCollectionDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskAddCollectionDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskAddCollectionHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskAddCollectionHeaders; -} - -// @public (undocumented) -export interface TaskAddCollectionHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskAddCollectionParameters = TaskAddCollectionQueryParam & TaskAddCollectionHeaderParam & TaskAddCollectionBodyParam & RequestParameters; - -// @public (undocumented) -export interface TaskAddCollectionQueryParam { - // (undocumented) - queryParameters?: TaskAddCollectionQueryParamProperties; -} - -// @public (undocumented) -export interface TaskAddCollectionQueryParamProperties { - timeOut?: number; -} - -// @public -export interface TaskAddCollectionResultOutput { - value?: Array; -} - -// @public (undocumented) -export interface TaskAddDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskAddDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskAddDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskAddHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskAddHeaders; -} - -// @public (undocumented) -export interface TaskAddHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskAddParameters = TaskAddQueryParam & TaskAddHeaderParam & TaskAddBodyParam & RequestParameters; - -// @public (undocumented) -export interface TaskAddQueryParam { - // (undocumented) - queryParameters?: TaskAddQueryParamProperties; -} - -// @public (undocumented) -export interface TaskAddQueryParamProperties { - timeOut?: number; -} - -// @public -export interface TaskAddResultOutput { - error?: BatchErrorOutput; - eTag?: string; - lastModified?: string; - location?: string; - status: string; - taskId: string; -} - -// @public -export interface TaskConstraints { - maxTaskRetryCount?: number; - maxWallClockTime?: string; - retentionTime?: string; -} - -// @public -export interface TaskConstraintsOutput { - maxTaskRetryCount?: number; - maxWallClockTime?: string; - retentionTime?: string; -} - -// @public -export interface TaskContainerExecutionInformation { - containerId?: string; - error?: string; - state?: string; -} - -// @public -export interface TaskContainerExecutionInformationOutput { - containerId?: string; - error?: string; - state?: string; -} - -// @public -export interface TaskContainerSettings { - containerRunOptions?: string; - imageName: string; - registry?: ContainerRegistry; - workingDirectory?: string; -} - -// @public -export interface TaskContainerSettingsOutput { - containerRunOptions?: string; - imageName: string; - registry?: ContainerRegistryOutput; - workingDirectory?: string; -} - -// @public -export interface TaskCountsOutput { - active: number; - completed: number; - failed: number; - running: number; - succeeded: number; -} - -// @public -export interface TaskCountsResultOutput { - readonly taskCounts: TaskCountsOutput; - taskSlotCounts: TaskSlotCountsOutput; -} - -// @public (undocumented) -export interface TaskDelete { - delete(options?: TaskDeleteParameters): StreamableMethod; - get(options?: TaskGetParameters): StreamableMethod; - put(options: TaskUpdateParameters): StreamableMethod; -} - -// @public (undocumented) -export interface TaskDelete200Headers { - "client-request-id"?: string; - "request-id"?: string; -} - -// @public -export interface TaskDelete200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & TaskDelete200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface TaskDeleteDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskDeleteDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskDeleteDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskDeleteHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskDeleteHeaders; -} - -// @public (undocumented) -export interface TaskDeleteHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskDeleteParameters = TaskDeleteQueryParam & TaskDeleteHeaderParam & RequestParameters; - -// @public (undocumented) -export interface TaskDeleteQueryParam { - // (undocumented) - queryParameters?: TaskDeleteQueryParamProperties; -} - -// @public (undocumented) -export interface TaskDeleteQueryParamProperties { - timeOut?: number; -} - -// @public -export interface TaskDependencies { - taskIdRanges?: Array; - taskIds?: string[]; -} - -// @public -export interface TaskDependenciesOutput { - taskIdRanges?: Array; - taskIds?: string[]; -} - -// @public -export interface TaskExecutionInformation { - containerInfo?: TaskContainerExecutionInformation; - endTime?: Date | string; - exitCode?: number; - failureInfo?: TaskFailureInformation; - lastRequeueTime?: Date | string; - lastRetryTime?: Date | string; - requeueCount: number; - result?: string; - retryCount: number; - startTime?: Date | string; -} - -// @public -export interface TaskExecutionInformationOutput { - containerInfo?: TaskContainerExecutionInformationOutput; - endTime?: string; - exitCode?: number; - failureInfo?: TaskFailureInformationOutput; - lastRequeueTime?: string; - lastRetryTime?: string; - requeueCount: number; - result?: string; - retryCount: number; - startTime?: string; -} - -// @public -export interface TaskFailureInformation { - category: string; - code?: string; - details?: Array; - message?: string; -} - -// @public -export interface TaskFailureInformationOutput { - category: string; - code?: string; - details?: Array; - message?: string; -} - -// @public (undocumented) -export interface TaskGet200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface TaskGet200Response extends HttpResponse { - // (undocumented) - body: BatchTaskOutput; - // (undocumented) - headers: RawHttpHeaders & TaskGet200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface TaskGetDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskGetDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskGetDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskGetHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskGetHeaders; -} - -// @public (undocumented) -export interface TaskGetHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskGetParameters = TaskGetQueryParam & TaskGetHeaderParam & RequestParameters; - -// @public (undocumented) -export interface TaskGetQueryParam { - // (undocumented) - queryParameters?: TaskGetQueryParamProperties; -} - -// @public (undocumented) -export interface TaskGetQueryParamProperties { - $expand?: string; - $select?: string; - timeOut?: number; -} - -// @public -export interface TaskIdRange { - end: number; - start: number; -} - -// @public -export interface TaskIdRangeOutput { - end: number; - start: number; -} - -// @public -export interface TaskInformationOutput { - executionInfo?: TaskExecutionInformationOutput; - jobId?: string; - subtaskId?: number; - taskId?: string; - taskState: string; - taskUrl?: string; -} - -// @public (undocumented) -export interface TaskList200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface TaskList200Response extends HttpResponse { - // (undocumented) - body: BatchTaskListResultOutput; - // (undocumented) - headers: RawHttpHeaders & TaskList200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface TaskListDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskListDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskListDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskListHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskListHeaders; -} - -// @public (undocumented) -export interface TaskListHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskListParameters = TaskListQueryParam & TaskListHeaderParam & RequestParameters; - -// @public (undocumented) -export interface TaskListQueryParam { - // (undocumented) - queryParameters?: TaskListQueryParamProperties; -} - -// @public (undocumented) -export interface TaskListQueryParamProperties { - $expand?: string; - $filter?: string; - $select?: string; - maxresults?: number; - timeOut?: number; -} - -// @public (undocumented) -export interface TaskListSubtasks { - get(options?: TaskListSubtasksParameters): StreamableMethod; -} - -// @public (undocumented) -export interface TaskListSubtasks200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - etag?: string; -} - -// @public -export interface TaskListSubtasks200Response extends HttpResponse { - // (undocumented) - body: BatchTaskListSubtasksResultOutput; - // (undocumented) - headers: RawHttpHeaders & TaskListSubtasks200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface TaskListSubtasksDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskListSubtasksDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskListSubtasksDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskListSubtasksHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskListSubtasksHeaders; -} - -// @public (undocumented) -export interface TaskListSubtasksHeaders { - "client-request-id"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskListSubtasksParameters = TaskListSubtasksQueryParam & TaskListSubtasksHeaderParam & RequestParameters; - -// @public (undocumented) -export interface TaskListSubtasksQueryParam { - // (undocumented) - queryParameters?: TaskListSubtasksQueryParamProperties; -} - -// @public (undocumented) -export interface TaskListSubtasksQueryParamProperties { - $select?: string; - timeOut?: number; -} - -// @public (undocumented) -export interface TaskReactivate { - post(options?: TaskReactivateParameters): StreamableMethod; -} - -// @public (undocumented) -export interface TaskReactivate204Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface TaskReactivate204Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & TaskReactivate204Headers; - // (undocumented) - status: "204"; -} - -// @public (undocumented) -export interface TaskReactivateDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskReactivateDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskReactivateDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskReactivateHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskReactivateHeaders; -} - -// @public (undocumented) -export interface TaskReactivateHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskReactivateParameters = TaskReactivateQueryParam & TaskReactivateHeaderParam & RequestParameters; - -// @public (undocumented) -export interface TaskReactivateQueryParam { - // (undocumented) - queryParameters?: TaskReactivateQueryParamProperties; -} - -// @public (undocumented) -export interface TaskReactivateQueryParamProperties { - timeOut?: number; -} - -// @public -export interface TaskSchedulingPolicy { - nodeFillType: string; -} - -// @public -export interface TaskSchedulingPolicyOutput { - nodeFillType: string; -} - -// @public -export interface TaskSlotCountsOutput { - active: number; - completed: number; - failed: number; - running: number; - succeeded: number; -} - -// @public -export interface TaskStatistics { - kernelCPUTime: string; - lastUpdateTime: Date | string; - readIOGiB: number; - readIOps: number; - startTime: Date | string; - url: string; - userCPUTime: string; - waitTime: string; - wallClockTime: string; - writeIOGiB: number; - writeIOps: number; -} - -// @public -export interface TaskStatisticsOutput { - kernelCPUTime: string; - lastUpdateTime: string; - readIOGiB: number; - readIOps: number; - startTime: string; - url: string; - userCPUTime: string; - waitTime: string; - wallClockTime: string; - writeIOGiB: number; - writeIOps: number; -} - -// @public (undocumented) -export interface TaskTerminate { - post(options?: TaskTerminateParameters): StreamableMethod; -} - -// @public (undocumented) -export interface TaskTerminate204Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface TaskTerminate204Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & TaskTerminate204Headers; - // (undocumented) - status: "204"; -} - -// @public (undocumented) -export interface TaskTerminateDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskTerminateDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskTerminateDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskTerminateHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskTerminateHeaders; -} - -// @public (undocumented) -export interface TaskTerminateHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskTerminateParameters = TaskTerminateQueryParam & TaskTerminateHeaderParam & RequestParameters; - -// @public (undocumented) -export interface TaskTerminateQueryParam { - // (undocumented) - queryParameters?: TaskTerminateQueryParamProperties; -} - -// @public (undocumented) -export interface TaskTerminateQueryParamProperties { - timeOut?: number; -} - -// @public (undocumented) -export interface TaskUpdate200Headers { - "client-request-id"?: string; - "last-modified"?: string; - "request-id"?: string; - dataserviceid: string; - etag?: string; -} - -// @public -export interface TaskUpdate200Response extends HttpResponse { - // (undocumented) - headers: RawHttpHeaders & TaskUpdate200Headers; - // (undocumented) - status: "200"; -} - -// @public (undocumented) -export interface TaskUpdateBodyParam { - body: BatchTask; -} - -// @public (undocumented) -export interface TaskUpdateDefaultHeaders { - "x-ms-error-code"?: string; -} - -// @public (undocumented) -export interface TaskUpdateDefaultResponse extends HttpResponse { - // (undocumented) - body: ErrorResponse; - // (undocumented) - headers: RawHttpHeaders & TaskUpdateDefaultHeaders; - // (undocumented) - status: string; -} - -// @public (undocumented) -export interface TaskUpdateHeaderParam { - // (undocumented) - headers?: RawHttpHeadersInput & TaskUpdateHeaders; -} - -// @public (undocumented) -export interface TaskUpdateHeaders { - "client-request-id"?: string; - "if-match"?: string; - "if-modified-since"?: string; - "if-none-match"?: string; - "if-unmodified-since"?: string; - "ocp-date"?: string; - "return-client-request-id"?: boolean; -} - -// @public (undocumented) -export type TaskUpdateParameters = TaskUpdateQueryParam & TaskUpdateHeaderParam & TaskUpdateBodyParam & RequestParameters; - -// @public (undocumented) -export interface TaskUpdateQueryParam { - // (undocumented) - queryParameters?: TaskUpdateQueryParamProperties; -} - -// @public (undocumented) -export interface TaskUpdateQueryParamProperties { - timeOut?: number; -} - -// @public -export interface UploadBatchServiceLogsConfiguration { - containerUrl: string; - endTime?: Date | string; - identityReference?: object; - startTime: Date | string; -} - -// @public -export interface UploadBatchServiceLogsResultOutput { - numberOfFilesUploaded: number; - readonly virtualDirectoryName: string; -} - -// @public -export interface UsageStatistics { - dedicatedCoreTime: string; - lastUpdateTime: Date | string; - startTime: Date | string; -} - -// @public -export interface UsageStatisticsOutput { - dedicatedCoreTime: string; - lastUpdateTime: string; - startTime: string; -} - -// @public -export interface UserAccount { - elevationLevel?: string; - linuxUserConfiguration?: LinuxUserConfiguration; - name: string; - password: string; - windowsUserConfiguration?: WindowsUserConfiguration; -} - -// @public -export interface UserAccountOutput { - elevationLevel?: string; - linuxUserConfiguration?: LinuxUserConfigurationOutput; - name: string; - password: string; - windowsUserConfiguration?: WindowsUserConfigurationOutput; -} - -// @public -export interface UserAssignedIdentity { - resourceId: string; -} - -// @public -export interface UserAssignedIdentityOutput { - readonly clientId?: string; - readonly principalId?: string; - resourceId: string; -} - -// @public -export interface UserIdentity { - autoUser?: AutoUserSpecification; - username?: string; -} - -// @public -export interface UserIdentityOutput { - autoUser?: AutoUserSpecificationOutput; - username?: string; -} - -// @public -export interface VirtualMachineConfiguration { - containerConfiguration?: ContainerConfiguration; - dataDisks?: Array; - diskEncryptionConfiguration?: DiskEncryptionConfiguration; - extensions?: Array; - imageReference: ImageReference; - licenseType?: string; - nodeAgentSKUId: string; - nodePlacementConfiguration?: NodePlacementConfiguration; - osDisk?: OSDisk; - windowsConfiguration?: WindowsConfiguration; -} - -// @public -export interface VirtualMachineConfigurationOutput { - containerConfiguration?: ContainerConfigurationOutput; - dataDisks?: Array; - diskEncryptionConfiguration?: DiskEncryptionConfigurationOutput; - extensions?: Array; - imageReference: ImageReferenceOutput; - licenseType?: string; - nodeAgentSKUId: string; - nodePlacementConfiguration?: NodePlacementConfigurationOutput; - osDisk?: OSDiskOutput; - windowsConfiguration?: WindowsConfigurationOutput; -} - -// @public -export interface VirtualMachineInfoOutput { - imageReference?: ImageReferenceOutput; -} - -// @public -export interface VMExtension { - autoUpgradeMinorVersion?: boolean; - name: string; - protectedSettings?: Object_2; - provisionAfterExtensions?: string[]; - publisher: string; - settings?: Object_2; - type: string; - typeHandlerVersion?: string; -} - -// @public -export interface VMExtensionInstanceViewOutput { - name?: string; - statuses?: Array; - subStatuses?: Array; -} - -// @public -export interface VMExtensionOutput { - autoUpgradeMinorVersion?: boolean; - name: string; - protectedSettings?: ObjectOutput; - provisionAfterExtensions?: string[]; - publisher: string; - settings?: ObjectOutput; - type: string; - typeHandlerVersion?: string; -} - -// @public -export interface WindowsConfiguration { - enableAutomaticUpdates?: boolean; -} - -// @public -export interface WindowsConfigurationOutput { - enableAutomaticUpdates?: boolean; -} - -// @public -export interface WindowsUserConfiguration { - loginMode?: string; -} - -// @public -export interface WindowsUserConfigurationOutput { - loginMode?: string; -} - -// (No @packageDocumentation comment for this package) - -``` diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/clientDefinitions.ts b/packages/typespec-test/test/batch/generated/typespec-ts/src/clientDefinitions.ts deleted file mode 100644 index b9e08e7820..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/clientDefinitions.ts +++ /dev/null @@ -1,1374 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { - ApplicationOperationsListParameters, - ApplicationOperationsGetParameters, - PoolListUsageMetricsParameters, - PoolGetAllLifetimeStatisticsParameters, - PoolAddParameters, - PoolListParameters, - PoolDeleteParameters, - PoolExistsParameters, - PoolGetParameters, - PoolPatchParameters, - PoolDisableAutoScaleParameters, - PoolEnableAutoScaleParameters, - PoolEvaluateAutoScaleParameters, - PoolResizeParameters, - PoolStopResizeParameters, - PoolUpdatePropertiesParameters, - PoolRemoveNodesParameters, - AccountListSupportedImagesParameters, - AccountListPoolNodeCountsParameters, - JobGetAllLifetimeStatisticsParameters, - JobDeleteParameters, - JobGetParameters, - JobPatchParameters, - JobUpdateParameters, - JobDisableParameters, - JobEnableParameters, - JobTerminateParameters, - JobAddParameters, - JobListParameters, - JobListFromJobScheduleParameters, - JobListPreparationAndReleaseTaskStatusParameters, - JobGetTaskCountsParameters, - CertificateOperationsAddParameters, - CertificateOperationsListParameters, - CertificateOperationsCancelDeletionParameters, - CertificateOperationsDeleteParameters, - CertificateOperationsGetParameters, - FileDeleteFromTaskParameters, - FileGetFromTaskParameters, - FileGetPropertiesFromTaskParameters, - FileDeleteFromComputeNodeParameters, - FileGetFromComputeNodeParameters, - FileGetPropertiesFromComputeNodeParameters, - FileListFromTaskParameters, - FileListFromComputeNodeParameters, - JobScheduleExistsParameters, - JobScheduleDeleteParameters, - JobScheduleGetParameters, - JobSchedulePatchParameters, - JobScheduleUpdateParameters, - JobScheduleDisableParameters, - JobScheduleEnableParameters, - JobScheduleTerminateParameters, - JobScheduleAddParameters, - JobScheduleListParameters, - TaskAddParameters, - TaskListParameters, - TaskAddCollectionParameters, - TaskDeleteParameters, - TaskGetParameters, - TaskUpdateParameters, - TaskListSubtasksParameters, - TaskTerminateParameters, - TaskReactivateParameters, - ComputeNodeOperationsAddUserParameters, - ComputeNodeOperationsDeleteUserParameters, - ComputeNodeOperationsUpdateUserParameters, - ComputeNodeOperationsGetParameters, - ComputeNodeOperationsRebootParameters, - ComputeNodeOperationsReimageParameters, - ComputeNodeOperationsDisableSchedulingParameters, - ComputeNodeOperationsEnableSchedulingParameters, - ComputeNodeOperationsGetRemoteLoginSettingsParameters, - ComputeNodeOperationsGetRemoteDesktopParameters, - ComputeNodeOperationsUploadBatchServiceLogsParameters, - ComputeNodeOperationsListParameters, - ComputeNodeExtensionOperationsGetParameters, - ComputeNodeExtensionOperationsListParameters, -} from "./parameters"; -import { - ApplicationOperationsList200Response, - ApplicationOperationsListDefaultResponse, - ApplicationOperationsGet200Response, - ApplicationOperationsGetDefaultResponse, - PoolListUsageMetrics200Response, - PoolListUsageMetricsDefaultResponse, - PoolGetAllLifetimeStatistics200Response, - PoolGetAllLifetimeStatisticsDefaultResponse, - PoolAdd201Response, - PoolAddDefaultResponse, - PoolList200Response, - PoolListDefaultResponse, - PoolDelete202Response, - PoolDeleteDefaultResponse, - PoolExists404Response, - PoolExistsDefaultResponse, - PoolGet200Response, - PoolGetDefaultResponse, - PoolPatch200Response, - PoolPatchDefaultResponse, - PoolDisableAutoScale200Response, - PoolDisableAutoScaleDefaultResponse, - PoolEnableAutoScale200Response, - PoolEnableAutoScaleDefaultResponse, - PoolEvaluateAutoScale200Response, - PoolEvaluateAutoScaleDefaultResponse, - PoolResize200Response, - PoolResizeDefaultResponse, - PoolStopResize200Response, - PoolStopResizeDefaultResponse, - PoolUpdateProperties200Response, - PoolUpdatePropertiesDefaultResponse, - PoolRemoveNodes200Response, - PoolRemoveNodesDefaultResponse, - AccountListSupportedImages200Response, - AccountListSupportedImagesDefaultResponse, - AccountListPoolNodeCounts200Response, - AccountListPoolNodeCountsDefaultResponse, - JobGetAllLifetimeStatistics200Response, - JobGetAllLifetimeStatisticsDefaultResponse, - JobDelete202Response, - JobDeleteDefaultResponse, - JobGet200Response, - JobGetDefaultResponse, - JobPatch200Response, - JobPatchDefaultResponse, - JobUpdate200Response, - JobUpdateDefaultResponse, - JobDisable202Response, - JobDisableDefaultResponse, - JobEnable202Response, - JobEnableDefaultResponse, - JobTerminate202Response, - JobTerminateDefaultResponse, - JobAdd201Response, - JobAddDefaultResponse, - JobList200Response, - JobListDefaultResponse, - JobListFromJobSchedule200Response, - JobListFromJobScheduleDefaultResponse, - JobListPreparationAndReleaseTaskStatus200Response, - JobListPreparationAndReleaseTaskStatusDefaultResponse, - JobGetTaskCounts200Response, - JobGetTaskCountsDefaultResponse, - CertificateOperationsAdd201Response, - CertificateOperationsAddDefaultResponse, - CertificateOperationsList200Response, - CertificateOperationsListDefaultResponse, - CertificateOperationsCancelDeletion204Response, - CertificateOperationsCancelDeletionDefaultResponse, - CertificateOperationsDelete202Response, - CertificateOperationsDeleteDefaultResponse, - CertificateOperationsGet200Response, - CertificateOperationsGetDefaultResponse, - FileDeleteFromTask200Response, - FileDeleteFromTaskDefaultResponse, - FileGetFromTask200Response, - FileGetFromTaskDefaultResponse, - FileGetPropertiesFromTask200Response, - FileGetPropertiesFromTaskDefaultResponse, - FileDeleteFromComputeNode200Response, - FileDeleteFromComputeNodeDefaultResponse, - FileGetFromComputeNode200Response, - FileGetFromComputeNodeDefaultResponse, - FileGetPropertiesFromComputeNode200Response, - FileGetPropertiesFromComputeNodeDefaultResponse, - FileListFromTask200Response, - FileListFromTaskDefaultResponse, - FileListFromComputeNode200Response, - FileListFromComputeNodeDefaultResponse, - JobScheduleExists200Response, - JobScheduleExists204Response, - JobScheduleExistsDefaultResponse, - JobScheduleDelete202Response, - JobScheduleDeleteDefaultResponse, - JobScheduleGet200Response, - JobScheduleGetDefaultResponse, - JobSchedulePatch200Response, - JobSchedulePatchDefaultResponse, - JobScheduleUpdate200Response, - JobScheduleUpdateDefaultResponse, - JobScheduleDisable204Response, - JobScheduleDisableDefaultResponse, - JobScheduleEnable204Response, - JobScheduleEnableDefaultResponse, - JobScheduleTerminate202Response, - JobScheduleTerminateDefaultResponse, - JobScheduleAdd201Response, - JobScheduleAddDefaultResponse, - JobScheduleList200Response, - JobScheduleListDefaultResponse, - TaskAdd201Response, - TaskAddDefaultResponse, - TaskList200Response, - TaskListDefaultResponse, - TaskAddCollection200Response, - TaskAddCollectionDefaultResponse, - TaskDelete200Response, - TaskDeleteDefaultResponse, - TaskGet200Response, - TaskGetDefaultResponse, - TaskUpdate200Response, - TaskUpdateDefaultResponse, - TaskListSubtasks200Response, - TaskListSubtasksDefaultResponse, - TaskTerminate204Response, - TaskTerminateDefaultResponse, - TaskReactivate204Response, - TaskReactivateDefaultResponse, - ComputeNodeOperationsAddUser201Response, - ComputeNodeOperationsAddUserDefaultResponse, - ComputeNodeOperationsDeleteUser200Response, - ComputeNodeOperationsDeleteUserDefaultResponse, - ComputeNodeOperationsUpdateUser200Response, - ComputeNodeOperationsUpdateUserDefaultResponse, - ComputeNodeOperationsGet200Response, - ComputeNodeOperationsGetDefaultResponse, - ComputeNodeOperationsReboot202Response, - ComputeNodeOperationsRebootDefaultResponse, - ComputeNodeOperationsReimage202Response, - ComputeNodeOperationsReimageDefaultResponse, - ComputeNodeOperationsDisableScheduling200Response, - ComputeNodeOperationsDisableSchedulingDefaultResponse, - ComputeNodeOperationsEnableScheduling200Response, - ComputeNodeOperationsEnableSchedulingDefaultResponse, - ComputeNodeOperationsGetRemoteLoginSettings200Response, - ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse, - ComputeNodeOperationsGetRemoteDesktop200Response, - ComputeNodeOperationsGetRemoteDesktopDefaultResponse, - ComputeNodeOperationsUploadBatchServiceLogs200Response, - ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse, - ComputeNodeOperationsList200Response, - ComputeNodeOperationsListDefaultResponse, - ComputeNodeExtensionOperationsGet200Response, - ComputeNodeExtensionOperationsGetDefaultResponse, - ComputeNodeExtensionOperationsList200Response, - ComputeNodeExtensionOperationsListDefaultResponse, -} from "./responses"; -import { Client, StreamableMethod } from "@azure-rest/core-client"; - -export interface ApplicationOperationsList { - /** - * This operation returns only Applications and versions that are available for - * use on Compute Nodes; that is, that can be used in an Package reference. For - * administrator information about applications and versions that are not yet - * available to Compute Nodes, use the Azure portal or the Azure Resource Manager - * API. - */ - get( - options?: ApplicationOperationsListParameters - ): StreamableMethod< - | ApplicationOperationsList200Response - | ApplicationOperationsListDefaultResponse - >; -} - -export interface ApplicationOperationsGet { - /** - * This operation returns only Applications and versions that are available for - * use on Compute Nodes; that is, that can be used in an Package reference. For - * administrator information about Applications and versions that are not yet - * available to Compute Nodes, use the Azure portal or the Azure Resource Manager - * API. - */ - get( - options?: ApplicationOperationsGetParameters - ): StreamableMethod< - | ApplicationOperationsGet200Response - | ApplicationOperationsGetDefaultResponse - >; -} - -export interface PoolListUsageMetrics { - /** - * If you do not specify a $filter clause including a poolId, the response - * includes all Pools that existed in the Account in the time range of the - * returned aggregation intervals. If you do not specify a $filter clause - * including a startTime or endTime these filters default to the start and end - * times of the last aggregation interval currently available; that is, only the - * last aggregation interval is returned. - */ - get( - options?: PoolListUsageMetricsParameters - ): StreamableMethod< - PoolListUsageMetrics200Response | PoolListUsageMetricsDefaultResponse - >; -} - -export interface PoolGetAllLifetimeStatistics { - /** - * Statistics are aggregated across all Pools that have ever existed in the - * Account, from Account creation to the last update time of the statistics. The - * statistics may not be immediately available. The Batch service performs - * periodic roll-up of statistics. The typical delay is about 30 minutes. - */ - get( - options?: PoolGetAllLifetimeStatisticsParameters - ): StreamableMethod< - | PoolGetAllLifetimeStatistics200Response - | PoolGetAllLifetimeStatisticsDefaultResponse - >; -} - -export interface PoolAdd { - /** - * When naming Pools, avoid including sensitive information such as user names or - * secret project names. This information may appear in telemetry logs accessible - * to Microsoft Support engineers. - */ - post( - options: PoolAddParameters - ): StreamableMethod; - /** Lists all of the Pools in the specified Account. */ - get( - options?: PoolListParameters - ): StreamableMethod; -} - -export interface PoolDelete { - /** - * When you request that a Pool be deleted, the following actions occur: the Pool - * state is set to deleting; any ongoing resize operation on the Pool are stopped; - * the Batch service starts resizing the Pool to zero Compute Nodes; any Tasks - * running on existing Compute Nodes are terminated and requeued (as if a resize - * Pool operation had been requested with the default requeue option); finally, - * the Pool is removed from the system. Because running Tasks are requeued, the - * user can rerun these Tasks by updating their Job to target a different Pool. - * The Tasks can then run on the new Pool. If you want to override the requeue - * behavior, then you should call resize Pool explicitly to shrink the Pool to - * zero size before deleting the Pool. If you call an Update, Patch or Delete API - * on a Pool in the deleting state, it will fail with HTTP status code 409 with - * error code PoolBeingDeleted. - */ - delete( - options?: PoolDeleteParameters - ): StreamableMethod; - /** Gets basic properties of a Pool. */ - head( - options?: PoolExistsParameters - ): StreamableMethod; - /** Gets information about the specified Pool. */ - get( - options: PoolGetParameters - ): StreamableMethod; - /** - * This only replaces the Pool properties specified in the request. For example, - * if the Pool has a StartTask associated with it, and a request does not specify - * a StartTask element, then the Pool keeps the existing StartTask. - */ - patch( - options: PoolPatchParameters - ): StreamableMethod; -} - -export interface PoolDisableAutoScale { - /** Disables automatic scaling for a Pool. */ - post( - options?: PoolDisableAutoScaleParameters - ): StreamableMethod< - PoolDisableAutoScale200Response | PoolDisableAutoScaleDefaultResponse - >; -} - -export interface PoolEnableAutoScale { - /** - * You cannot enable automatic scaling on a Pool if a resize operation is in - * progress on the Pool. If automatic scaling of the Pool is currently disabled, - * you must specify a valid autoscale formula as part of the request. If automatic - * scaling of the Pool is already enabled, you may specify a new autoscale formula - * and/or a new evaluation interval. You cannot call this API for the same Pool - * more than once every 30 seconds. - */ - post( - options: PoolEnableAutoScaleParameters - ): StreamableMethod< - PoolEnableAutoScale200Response | PoolEnableAutoScaleDefaultResponse - >; -} - -export interface PoolEvaluateAutoScale { - /** - * This API is primarily for validating an autoscale formula, as it simply returns - * the result without applying the formula to the Pool. The Pool must have auto - * scaling enabled in order to evaluate a formula. - */ - post( - options: PoolEvaluateAutoScaleParameters - ): StreamableMethod< - PoolEvaluateAutoScale200Response | PoolEvaluateAutoScaleDefaultResponse - >; -} - -export interface PoolResize { - /** - * You can only resize a Pool when its allocation state is steady. If the Pool is - * already resizing, the request fails with status code 409. When you resize a - * Pool, the Pool's allocation state changes from steady to resizing. You cannot - * resize Pools which are configured for automatic scaling. If you try to do this, - * the Batch service returns an error 409. If you resize a Pool downwards, the - * Batch service chooses which Compute Nodes to remove. To remove specific Compute - * Nodes, use the Pool remove Compute Nodes API instead. - */ - post( - options: PoolResizeParameters - ): StreamableMethod; -} - -export interface PoolStopResize { - /** - * This does not restore the Pool to its previous state before the resize - * operation: it only stops any further changes being made, and the Pool maintains - * its current state. After stopping, the Pool stabilizes at the number of Compute - * Nodes it was at when the stop operation was done. During the stop operation, - * the Pool allocation state changes first to stopping and then to steady. A - * resize operation need not be an explicit resize Pool request; this API can also - * be used to halt the initial sizing of the Pool when it is created. - */ - post( - options?: PoolStopResizeParameters - ): StreamableMethod< - PoolStopResize200Response | PoolStopResizeDefaultResponse - >; -} - -export interface PoolUpdateProperties { - /** - * This fully replaces all the updatable properties of the Pool. For example, if - * the Pool has a StartTask associated with it and if StartTask is not specified - * with this request, then the Batch service will remove the existing StartTask. - */ - post( - options: PoolUpdatePropertiesParameters - ): StreamableMethod< - PoolUpdateProperties200Response | PoolUpdatePropertiesDefaultResponse - >; -} - -export interface PoolRemoveNodes { - /** - * This operation can only run when the allocation state of the Pool is steady. - * When this operation runs, the allocation state changes from steady to resizing. - * Each request may remove up to 100 nodes. - */ - post( - options: PoolRemoveNodesParameters - ): StreamableMethod< - PoolRemoveNodes200Response | PoolRemoveNodesDefaultResponse - >; -} - -export interface AccountListSupportedImages { - /** Lists all Virtual Machine Images supported by the Azure Batch service. */ - get( - options?: AccountListSupportedImagesParameters - ): StreamableMethod< - | AccountListSupportedImages200Response - | AccountListSupportedImagesDefaultResponse - >; -} - -export interface AccountListPoolNodeCounts { - /** - * Gets the number of Compute Nodes in each state, grouped by Pool. Note that the - * numbers returned may not always be up to date. If you need exact node counts, - * use a list query. - */ - get( - options?: AccountListPoolNodeCountsParameters - ): StreamableMethod< - | AccountListPoolNodeCounts200Response - | AccountListPoolNodeCountsDefaultResponse - >; -} - -export interface JobGetAllLifetimeStatistics { - /** - * Statistics are aggregated across all Jobs that have ever existed in the - * Account, from Account creation to the last update time of the statistics. The - * statistics may not be immediately available. The Batch service performs - * periodic roll-up of statistics. The typical delay is about 30 minutes. - */ - get( - options?: JobGetAllLifetimeStatisticsParameters - ): StreamableMethod< - | JobGetAllLifetimeStatistics200Response - | JobGetAllLifetimeStatisticsDefaultResponse - >; -} - -export interface JobDelete { - /** - * Deleting a Job also deletes all Tasks that are part of that Job, and all Job - * statistics. This also overrides the retention period for Task data; that is, if - * the Job contains Tasks which are still retained on Compute Nodes, the Batch - * services deletes those Tasks' working directories and all their contents. When - * a Delete Job request is received, the Batch service sets the Job to the - * deleting state. All update operations on a Job that is in deleting state will - * fail with status code 409 (Conflict), with additional information indicating - * that the Job is being deleted. - */ - delete( - options?: JobDeleteParameters - ): StreamableMethod; - /** Gets information about the specified Job. */ - get( - options?: JobGetParameters - ): StreamableMethod; - /** - * This replaces only the Job properties specified in the request. For example, if - * the Job has constraints, and a request does not specify the constraints - * element, then the Job keeps the existing constraints. - */ - patch( - options: JobPatchParameters - ): StreamableMethod; - /** - * This fully replaces all the updatable properties of the Job. For example, if - * the Job has constraints associated with it and if constraints is not specified - * with this request, then the Batch service will remove the existing constraints. - */ - put( - options: JobUpdateParameters - ): StreamableMethod; -} - -export interface JobDisable { - /** - * The Batch Service immediately moves the Job to the disabling state. Batch then - * uses the disableTasks parameter to determine what to do with the currently - * running Tasks of the Job. The Job remains in the disabling state until the - * disable operation is completed and all Tasks have been dealt with according to - * the disableTasks option; the Job then moves to the disabled state. No new Tasks - * are started under the Job until it moves back to active state. If you try to - * disable a Job that is in any state other than active, disabling, or disabled, - * the request fails with status code 409. - */ - post( - options: JobDisableParameters - ): StreamableMethod; -} - -export interface JobEnable { - /** - * When you call this API, the Batch service sets a disabled Job to the enabling - * state. After the this operation is completed, the Job moves to the active - * state, and scheduling of new Tasks under the Job resumes. The Batch service - * does not allow a Task to remain in the active state for more than 180 days. - * Therefore, if you enable a Job containing active Tasks which were added more - * than 180 days ago, those Tasks will not run. - */ - post( - options?: JobEnableParameters - ): StreamableMethod; -} - -export interface JobTerminate { - /** - * When a Terminate Job request is received, the Batch service sets the Job to the - * terminating state. The Batch service then terminates any running Tasks - * associated with the Job and runs any required Job release Tasks. Then the Job - * moves into the completed state. If there are any Tasks in the Job in the active - * state, they will remain in the active state. Once a Job is terminated, new - * Tasks cannot be added and any remaining active Tasks will not be scheduled. - */ - post( - options: JobTerminateParameters - ): StreamableMethod; -} - -export interface JobAdd { - /** - * The Batch service supports two ways to control the work done as part of a Job. - * In the first approach, the user specifies a Job Manager Task. The Batch service - * launches this Task when it is ready to start the Job. The Job Manager Task - * controls all other Tasks that run under this Job, by using the Task APIs. In - * the second approach, the user directly controls the execution of Tasks under an - * active Job, by using the Task APIs. Also note: when naming Jobs, avoid - * including sensitive information such as user names or secret project names. - * This information may appear in telemetry logs accessible to Microsoft Support - * engineers. - */ - post( - options: JobAddParameters - ): StreamableMethod; - /** Lists all of the Jobs in the specified Account. */ - get( - options: JobListParameters - ): StreamableMethod; -} - -export interface JobListFromJobSchedule { - /** Lists the Jobs that have been created under the specified Job Schedule. */ - get( - options: JobListFromJobScheduleParameters - ): StreamableMethod< - JobListFromJobSchedule200Response | JobListFromJobScheduleDefaultResponse - >; -} - -export interface JobListPreparationAndReleaseTaskStatus { - /** - * This API returns the Job Preparation and Job Release Task status on all Compute - * Nodes that have run the Job Preparation or Job Release Task. This includes - * Compute Nodes which have since been removed from the Pool. If this API is - * invoked on a Job which has no Job Preparation or Job Release Task, the Batch - * service returns HTTP status code 409 (Conflict) with an error code of - * JobPreparationTaskNotSpecified. - */ - get( - options: JobListPreparationAndReleaseTaskStatusParameters - ): StreamableMethod< - | JobListPreparationAndReleaseTaskStatus200Response - | JobListPreparationAndReleaseTaskStatusDefaultResponse - >; -} - -export interface JobGetTaskCounts { - /** - * Task counts provide a count of the Tasks by active, running or completed Task - * state, and a count of Tasks which succeeded or failed. Tasks in the preparing - * state are counted as running. Note that the numbers returned may not always be - * up to date. If you need exact task counts, use a list query. - */ - get( - options?: JobGetTaskCountsParameters - ): StreamableMethod< - JobGetTaskCounts200Response | JobGetTaskCountsDefaultResponse - >; -} - -export interface CertificateOperationsAdd { - /** Adds a Certificate to the specified Account. */ - post( - options: CertificateOperationsAddParameters - ): StreamableMethod< - | CertificateOperationsAdd201Response - | CertificateOperationsAddDefaultResponse - >; - /** Lists all of the Certificates that have been added to the specified Account. */ - get( - options: CertificateOperationsListParameters - ): StreamableMethod< - | CertificateOperationsList200Response - | CertificateOperationsListDefaultResponse - >; -} - -export interface CertificateOperationsCancelDeletion { - /** - * If you try to delete a Certificate that is being used by a Pool or Compute - * Node, the status of the Certificate changes to deleteFailed. If you decide that - * you want to continue using the Certificate, you can use this operation to set - * the status of the Certificate back to active. If you intend to delete the - * Certificate, you do not need to run this operation after the deletion failed. - * You must make sure that the Certificate is not being used by any resources, and - * then you can try again to delete the Certificate. - */ - post( - options?: CertificateOperationsCancelDeletionParameters - ): StreamableMethod< - | CertificateOperationsCancelDeletion204Response - | CertificateOperationsCancelDeletionDefaultResponse - >; -} - -export interface CertificateOperationsDelete { - /** - * You cannot delete a Certificate if a resource (Pool or Compute Node) is using - * it. Before you can delete a Certificate, you must therefore make sure that the - * Certificate is not associated with any existing Pools, the Certificate is not - * installed on any Nodes (even if you remove a Certificate from a Pool, it is not - * removed from existing Compute Nodes in that Pool until they restart), and no - * running Tasks depend on the Certificate. If you try to delete a Certificate - * that is in use, the deletion fails. The Certificate status changes to - * deleteFailed. You can use Cancel Delete Certificate to set the status back to - * active if you decide that you want to continue using the Certificate. - */ - delete( - options?: CertificateOperationsDeleteParameters - ): StreamableMethod< - | CertificateOperationsDelete202Response - | CertificateOperationsDeleteDefaultResponse - >; - /** Gets information about the specified Certificate. */ - get( - options: CertificateOperationsGetParameters - ): StreamableMethod< - | CertificateOperationsGet200Response - | CertificateOperationsGetDefaultResponse - >; -} - -export interface FileDeleteFromTask { - /** Deletes the specified Task file from the Compute Node where the Task ran. */ - delete( - options: FileDeleteFromTaskParameters - ): StreamableMethod< - FileDeleteFromTask200Response | FileDeleteFromTaskDefaultResponse - >; - /** Returns the content of the specified Task file. */ - get( - options?: FileGetFromTaskParameters - ): StreamableMethod< - FileGetFromTask200Response | FileGetFromTaskDefaultResponse - >; - /** Gets the properties of the specified Task file. */ - head( - options?: FileGetPropertiesFromTaskParameters - ): StreamableMethod< - | FileGetPropertiesFromTask200Response - | FileGetPropertiesFromTaskDefaultResponse - >; -} - -export interface FileDeleteFromComputeNode { - /** Deletes the specified file from the Compute Node. */ - delete( - options?: FileDeleteFromComputeNodeParameters - ): StreamableMethod< - | FileDeleteFromComputeNode200Response - | FileDeleteFromComputeNodeDefaultResponse - >; - /** Returns the content of the specified Compute Node file. */ - get( - options?: FileGetFromComputeNodeParameters - ): StreamableMethod< - FileGetFromComputeNode200Response | FileGetFromComputeNodeDefaultResponse - >; - /** Gets the properties of the specified Compute Node file. */ - head( - options?: FileGetPropertiesFromComputeNodeParameters - ): StreamableMethod< - | FileGetPropertiesFromComputeNode200Response - | FileGetPropertiesFromComputeNodeDefaultResponse - >; -} - -export interface FileListFromTask { - /** Lists the files in a Task's directory on its Compute Node. */ - get( - options: FileListFromTaskParameters - ): StreamableMethod< - FileListFromTask200Response | FileListFromTaskDefaultResponse - >; -} - -export interface FileListFromComputeNode { - /** Lists all of the files in Task directories on the specified Compute Node. */ - get( - options: FileListFromComputeNodeParameters - ): StreamableMethod< - FileListFromComputeNode200Response | FileListFromComputeNodeDefaultResponse - >; -} - -export interface JobScheduleExists { - /** Checks the specified Job Schedule exists. */ - head( - options?: JobScheduleExistsParameters - ): StreamableMethod< - | JobScheduleExists200Response - | JobScheduleExists204Response - | JobScheduleExistsDefaultResponse - >; - /** - * When you delete a Job Schedule, this also deletes all Jobs and Tasks under that - * schedule. When Tasks are deleted, all the files in their working directories on - * the Compute Nodes are also deleted (the retention period is ignored). The Job - * Schedule statistics are no longer accessible once the Job Schedule is deleted, - * though they are still counted towards Account lifetime statistics. - */ - delete( - options?: JobScheduleDeleteParameters - ): StreamableMethod< - JobScheduleDelete202Response | JobScheduleDeleteDefaultResponse - >; - /** Gets information about the specified Job Schedule. */ - get( - options?: JobScheduleGetParameters - ): StreamableMethod< - JobScheduleGet200Response | JobScheduleGetDefaultResponse - >; - /** - * This replaces only the Job Schedule properties specified in the request. For - * example, if the schedule property is not specified with this request, then the - * Batch service will keep the existing schedule. Changes to a Job Schedule only - * impact Jobs created by the schedule after the update has taken place; currently - * running Jobs are unaffected. - */ - patch( - options: JobSchedulePatchParameters - ): StreamableMethod< - JobSchedulePatch200Response | JobSchedulePatchDefaultResponse - >; - /** - * This fully replaces all the updatable properties of the Job Schedule. For - * example, if the schedule property is not specified with this request, then the - * Batch service will remove the existing schedule. Changes to a Job Schedule only - * impact Jobs created by the schedule after the update has taken place; currently - * running Jobs are unaffected. - */ - put( - options: JobScheduleUpdateParameters - ): StreamableMethod< - JobScheduleUpdate200Response | JobScheduleUpdateDefaultResponse - >; -} - -export interface JobScheduleDisable { - /** No new Jobs will be created until the Job Schedule is enabled again. */ - post( - options?: JobScheduleDisableParameters - ): StreamableMethod< - JobScheduleDisable204Response | JobScheduleDisableDefaultResponse - >; -} - -export interface JobScheduleEnable { - /** Enables a Job Schedule. */ - post( - options?: JobScheduleEnableParameters - ): StreamableMethod< - JobScheduleEnable204Response | JobScheduleEnableDefaultResponse - >; -} - -export interface JobScheduleTerminate { - /** Terminates a Job Schedule. */ - post( - options?: JobScheduleTerminateParameters - ): StreamableMethod< - JobScheduleTerminate202Response | JobScheduleTerminateDefaultResponse - >; -} - -export interface JobScheduleAdd { - /** Adds a Job Schedule to the specified Account. */ - post( - options: JobScheduleAddParameters - ): StreamableMethod< - JobScheduleAdd201Response | JobScheduleAddDefaultResponse - >; - /** Lists all of the Job Schedules in the specified Account. */ - get( - options?: JobScheduleListParameters - ): StreamableMethod< - JobScheduleList200Response | JobScheduleListDefaultResponse - >; -} - -export interface TaskAdd { - /** - * The maximum lifetime of a Task from addition to completion is 180 days. If a - * Task has not completed within 180 days of being added it will be terminated by - * the Batch service and left in whatever state it was in at that time. - */ - post( - options: TaskAddParameters - ): StreamableMethod; - /** - * For multi-instance Tasks, information such as affinityId, executionInfo and - * nodeInfo refer to the primary Task. Use the list subtasks API to retrieve - * information about subtasks. - */ - get( - options?: TaskListParameters - ): StreamableMethod; -} - -export interface TaskAddCollection { - /** - * Note that each Task must have a unique ID. The Batch service may not return the - * results for each Task in the same order the Tasks were submitted in this - * request. If the server times out or the connection is closed during the - * request, the request may have been partially or fully processed, or not at all. - * In such cases, the user should re-issue the request. Note that it is up to the - * user to correctly handle failures when re-issuing a request. For example, you - * should use the same Task IDs during a retry so that if the prior operation - * succeeded, the retry will not create extra Tasks unexpectedly. If the response - * contains any Tasks which failed to add, a client can retry the request. In a - * retry, it is most efficient to resubmit only Tasks that failed to add, and to - * omit Tasks that were successfully added on the first attempt. The maximum - * lifetime of a Task from addition to completion is 180 days. If a Task has not - * completed within 180 days of being added it will be terminated by the Batch - * service and left in whatever state it was in at that time. - */ - post( - options: TaskAddCollectionParameters - ): StreamableMethod< - TaskAddCollection200Response | TaskAddCollectionDefaultResponse - >; -} - -export interface TaskDelete { - /** - * When a Task is deleted, all of the files in its directory on the Compute Node - * where it ran are also deleted (regardless of the retention time). For - * multi-instance Tasks, the delete Task operation applies synchronously to the - * primary task; subtasks and their files are then deleted asynchronously in the - * background. - */ - delete( - options?: TaskDeleteParameters - ): StreamableMethod; - /** - * For multi-instance Tasks, information such as affinityId, executionInfo and - * nodeInfo refer to the primary Task. Use the list subtasks API to retrieve - * information about subtasks. - */ - get( - options?: TaskGetParameters - ): StreamableMethod; - /** Updates the properties of the specified Task. */ - put( - options: TaskUpdateParameters - ): StreamableMethod; -} - -export interface TaskListSubtasks { - /** If the Task is not a multi-instance Task then this returns an empty collection. */ - get( - options?: TaskListSubtasksParameters - ): StreamableMethod< - TaskListSubtasks200Response | TaskListSubtasksDefaultResponse - >; -} - -export interface TaskTerminate { - /** - * When the Task has been terminated, it moves to the completed state. For - * multi-instance Tasks, the terminate Task operation applies synchronously to the - * primary task; subtasks are then terminated asynchronously in the background. - */ - post( - options?: TaskTerminateParameters - ): StreamableMethod; -} - -export interface TaskReactivate { - /** - * Reactivation makes a Task eligible to be retried again up to its maximum retry - * count. The Task's state is changed to active. As the Task is no longer in the - * completed state, any previous exit code or failure information is no longer - * available after reactivation. Each time a Task is reactivated, its retry count - * is reset to 0. Reactivation will fail for Tasks that are not completed or that - * previously completed successfully (with an exit code of 0). Additionally, it - * will fail if the Job has completed (or is terminating or deleting). - */ - post( - options?: TaskReactivateParameters - ): StreamableMethod< - TaskReactivate204Response | TaskReactivateDefaultResponse - >; -} - -export interface ComputeNodeOperationsAddUser { - /** - * You can add a user Account to a Compute Node only when it is in the idle or - * running state. - */ - post( - options: ComputeNodeOperationsAddUserParameters - ): StreamableMethod< - | ComputeNodeOperationsAddUser201Response - | ComputeNodeOperationsAddUserDefaultResponse - >; -} - -export interface ComputeNodeOperationsDeleteUser { - /** - * You can delete a user Account to a Compute Node only when it is in the idle or - * running state. - */ - delete( - options?: ComputeNodeOperationsDeleteUserParameters - ): StreamableMethod< - | ComputeNodeOperationsDeleteUser200Response - | ComputeNodeOperationsDeleteUserDefaultResponse - >; - /** - * This operation replaces of all the updatable properties of the Account. For - * example, if the expiryTime element is not specified, the current value is - * replaced with the default value, not left unmodified. You can update a user - * Account on a Compute Node only when it is in the idle or running state. - */ - put( - options: ComputeNodeOperationsUpdateUserParameters - ): StreamableMethod< - | ComputeNodeOperationsUpdateUser200Response - | ComputeNodeOperationsUpdateUserDefaultResponse - >; -} - -export interface ComputeNodeOperationsGet { - /** Gets information about the specified Compute Node. */ - get( - options?: ComputeNodeOperationsGetParameters - ): StreamableMethod< - | ComputeNodeOperationsGet200Response - | ComputeNodeOperationsGetDefaultResponse - >; -} - -export interface ComputeNodeOperationsReboot { - /** You can restart a Compute Node only if it is in an idle or running state. */ - post( - options: ComputeNodeOperationsRebootParameters - ): StreamableMethod< - | ComputeNodeOperationsReboot202Response - | ComputeNodeOperationsRebootDefaultResponse - >; -} - -export interface ComputeNodeOperationsReimage { - /** - * You can reinstall the operating system on a Compute Node only if it is in an - * idle or running state. This API can be invoked only on Pools created with the - * cloud service configuration property. - */ - post( - options: ComputeNodeOperationsReimageParameters - ): StreamableMethod< - | ComputeNodeOperationsReimage202Response - | ComputeNodeOperationsReimageDefaultResponse - >; -} - -export interface ComputeNodeOperationsDisableScheduling { - /** - * You can disable Task scheduling on a Compute Node only if its current - * scheduling state is enabled. - */ - post( - options: ComputeNodeOperationsDisableSchedulingParameters - ): StreamableMethod< - | ComputeNodeOperationsDisableScheduling200Response - | ComputeNodeOperationsDisableSchedulingDefaultResponse - >; -} - -export interface ComputeNodeOperationsEnableScheduling { - /** - * You can enable Task scheduling on a Compute Node only if its current scheduling - * state is disabled - */ - post( - options?: ComputeNodeOperationsEnableSchedulingParameters - ): StreamableMethod< - | ComputeNodeOperationsEnableScheduling200Response - | ComputeNodeOperationsEnableSchedulingDefaultResponse - >; -} - -export interface ComputeNodeOperationsGetRemoteLoginSettings { - /** - * Before you can remotely login to a Compute Node using the remote login - * settings, you must create a user Account on the Compute Node. This API can be - * invoked only on Pools created with the virtual machine configuration property. - * For Pools created with a cloud service configuration, see the GetRemoteDesktop - * API. - */ - get( - options?: ComputeNodeOperationsGetRemoteLoginSettingsParameters - ): StreamableMethod< - | ComputeNodeOperationsGetRemoteLoginSettings200Response - | ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse - >; -} - -export interface ComputeNodeOperationsGetRemoteDesktop { - /** - * Before you can access a Compute Node by using the RDP file, you must create a - * user Account on the Compute Node. This API can only be invoked on Pools created - * with a cloud service configuration. For Pools created with a virtual machine - * configuration, see the GetRemoteLoginSettings API. - */ - get( - options?: ComputeNodeOperationsGetRemoteDesktopParameters - ): StreamableMethod< - | ComputeNodeOperationsGetRemoteDesktop200Response - | ComputeNodeOperationsGetRemoteDesktopDefaultResponse - >; -} - -export interface ComputeNodeOperationsUploadBatchServiceLogs { - /** - * This is for gathering Azure Batch service log files in an automated fashion - * from Compute Nodes if you are experiencing an error and wish to escalate to - * Azure support. The Azure Batch service log files should be shared with Azure - * support to aid in debugging issues with the Batch service. - */ - post( - options: ComputeNodeOperationsUploadBatchServiceLogsParameters - ): StreamableMethod< - | ComputeNodeOperationsUploadBatchServiceLogs200Response - | ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse - >; -} - -export interface ComputeNodeOperationsList { - /** Lists the Compute Nodes in the specified Pool. */ - get( - options: ComputeNodeOperationsListParameters - ): StreamableMethod< - | ComputeNodeOperationsList200Response - | ComputeNodeOperationsListDefaultResponse - >; -} - -export interface ComputeNodeExtensionOperationsGet { - /** Gets information about the specified Compute Node Extension. */ - get( - options?: ComputeNodeExtensionOperationsGetParameters - ): StreamableMethod< - | ComputeNodeExtensionOperationsGet200Response - | ComputeNodeExtensionOperationsGetDefaultResponse - >; -} - -export interface ComputeNodeExtensionOperationsList { - /** Lists the Compute Nodes Extensions in the specified Pool. */ - get( - options?: ComputeNodeExtensionOperationsListParameters - ): StreamableMethod< - | ComputeNodeExtensionOperationsList200Response - | ComputeNodeExtensionOperationsListDefaultResponse - >; -} - -export interface Routes { - /** Resource for '/applications' has methods for the following verbs: get */ - (path: "/applications"): ApplicationOperationsList; - /** Resource for '/applications/\{applicationId\}' has methods for the following verbs: get */ - ( - path: "/applications/{applicationId}", - applicationId: string - ): ApplicationOperationsGet; - /** Resource for '/poolusagemetrics' has methods for the following verbs: get */ - (path: "/poolusagemetrics"): PoolListUsageMetrics; - /** Resource for '/lifetimepoolstats' has methods for the following verbs: get */ - (path: "/lifetimepoolstats"): PoolGetAllLifetimeStatistics; - /** Resource for '/pools' has methods for the following verbs: post, get */ - (path: "/pools"): PoolAdd; - /** Resource for '/pools/\{poolId\}' has methods for the following verbs: delete, head, get, patch */ - (path: "/pools/{poolId}", poolId: string): PoolDelete; - /** Resource for '/pools/\{poolId\}/disableautoscale' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/disableautoscale", - poolId: string - ): PoolDisableAutoScale; - /** Resource for '/pools/\{poolId\}/enableautoscale' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/enableautoscale", - poolId: string - ): PoolEnableAutoScale; - /** Resource for '/pools/\{poolId\}/evaluateautoscale' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/evaluateautoscale", - poolId: string - ): PoolEvaluateAutoScale; - /** Resource for '/pools/\{poolId\}/resize' has methods for the following verbs: post */ - (path: "/pools/{poolId}/resize", poolId: string): PoolResize; - /** Resource for '/pools/\{poolId\}/stopresize' has methods for the following verbs: post */ - (path: "/pools/{poolId}/stopresize", poolId: string): PoolStopResize; - /** Resource for '/pools/\{poolId\}/updateproperties' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/updateproperties", - poolId: string - ): PoolUpdateProperties; - /** Resource for '/pools/\{poolId\}/removenodes' has methods for the following verbs: post */ - (path: "/pools/{poolId}/removenodes", poolId: string): PoolRemoveNodes; - /** Resource for '/supportedimages' has methods for the following verbs: get */ - (path: "/supportedimages"): AccountListSupportedImages; - /** Resource for '/nodecounts' has methods for the following verbs: get */ - (path: "/nodecounts"): AccountListPoolNodeCounts; - /** Resource for '/lifetimejobstats' has methods for the following verbs: get */ - (path: "/lifetimejobstats"): JobGetAllLifetimeStatistics; - /** Resource for '/jobs/\{jobId\}' has methods for the following verbs: delete, get, patch, put */ - (path: "/jobs/{jobId}", jobId: string): JobDelete; - /** Resource for '/jobs/\{jobId\}/disable' has methods for the following verbs: post */ - (path: "/jobs/{jobId}/disable", jobId: string): JobDisable; - /** Resource for '/jobs/\{jobId\}/enable' has methods for the following verbs: post */ - (path: "/jobs/{jobId}/enable", jobId: string): JobEnable; - /** Resource for '/jobs/\{jobId\}/terminate' has methods for the following verbs: post */ - (path: "/jobs/{jobId}/terminate", jobId: string): JobTerminate; - /** Resource for '/jobs' has methods for the following verbs: post, get */ - (path: "/jobs"): JobAdd; - /** Resource for '/jobschedules/\{jobScheduleId\}/jobs' has methods for the following verbs: get */ - ( - path: "/jobschedules/{jobScheduleId}/jobs", - jobScheduleId: string - ): JobListFromJobSchedule; - /** Resource for '/jobs/\{jobId\}/jobpreparationandreleasetaskstatus' has methods for the following verbs: get */ - ( - path: "/jobs/{jobId}/jobpreparationandreleasetaskstatus", - jobId: string - ): JobListPreparationAndReleaseTaskStatus; - /** Resource for '/jobs/\{jobId\}/taskcounts' has methods for the following verbs: get */ - (path: "/jobs/{jobId}/taskcounts", jobId: string): JobGetTaskCounts; - /** Resource for '/certificates' has methods for the following verbs: post, get */ - (path: "/certificates"): CertificateOperationsAdd; - /** Resource for '/certificates(thumbprintAlgorithm=\{thumbprintAlgorithm\},thumbprint=\{thumbprint\})/canceldelete' has methods for the following verbs: post */ - ( - path: "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete", - thumbprintAlgorithm: string, - thumbprint: string - ): CertificateOperationsCancelDeletion; - /** Resource for '/certificates(thumbprintAlgorithm=\{thumbprintAlgorithm\},thumbprint=\{thumbprint\})' has methods for the following verbs: delete, get */ - ( - path: "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", - thumbprintAlgorithm: string, - thumbprint: string - ): CertificateOperationsDelete; - /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/files/\{filePath\}' has methods for the following verbs: delete, get, head */ - ( - path: "/jobs/{jobId}/tasks/{taskId}/files/{filePath}", - jobId: string, - taskId: string, - filePath: string - ): FileDeleteFromTask; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/files/\{filePath\}' has methods for the following verbs: delete, get, head */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/files/{filePath}", - poolId: string, - nodeId: string, - filePath: string - ): FileDeleteFromComputeNode; - /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/files' has methods for the following verbs: get */ - ( - path: "/jobs/{jobId}/tasks/{taskId}/files", - jobId: string, - taskId: string - ): FileListFromTask; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/files' has methods for the following verbs: get */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/files", - poolId: string, - nodeId: string - ): FileListFromComputeNode; - /** Resource for '/jobschedules/\{jobScheduleId\}' has methods for the following verbs: head, delete, get, patch, put */ - ( - path: "/jobschedules/{jobScheduleId}", - jobScheduleId: string - ): JobScheduleExists; - /** Resource for '/jobschedules/\{jobScheduleId\}/disable' has methods for the following verbs: post */ - ( - path: "/jobschedules/{jobScheduleId}/disable", - jobScheduleId: string - ): JobScheduleDisable; - /** Resource for '/jobschedules/\{jobScheduleId\}/enable' has methods for the following verbs: post */ - ( - path: "/jobschedules/{jobScheduleId}/enable", - jobScheduleId: string - ): JobScheduleEnable; - /** Resource for '/jobschedules/\{jobScheduleId\}/terminate' has methods for the following verbs: post */ - ( - path: "/jobschedules/{jobScheduleId}/terminate", - jobScheduleId: string - ): JobScheduleTerminate; - /** Resource for '/jobschedules' has methods for the following verbs: post, get */ - (path: "/jobschedules"): JobScheduleAdd; - /** Resource for '/jobs/\{jobId\}/tasks' has methods for the following verbs: post, get */ - (path: "/jobs/{jobId}/tasks", jobId: string): TaskAdd; - /** Resource for '/jobs/\{jobId\}/addtaskcollection' has methods for the following verbs: post */ - (path: "/jobs/{jobId}/addtaskcollection", jobId: string): TaskAddCollection; - /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}' has methods for the following verbs: delete, get, put */ - ( - path: "/jobs/{jobId}/tasks/{taskId}", - jobId: string, - taskId: string - ): TaskDelete; - /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/subtasksinfo' has methods for the following verbs: get */ - ( - path: "/jobs/{jobId}/tasks/{taskId}/subtasksinfo", - jobId: string, - taskId: string - ): TaskListSubtasks; - /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/terminate' has methods for the following verbs: post */ - ( - path: "/jobs/{jobId}/tasks/{taskId}/terminate", - jobId: string, - taskId: string - ): TaskTerminate; - /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/reactivate' has methods for the following verbs: post */ - ( - path: "/jobs/{jobId}/tasks/{taskId}/reactivate", - jobId: string, - taskId: string - ): TaskReactivate; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/users' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/users", - poolId: string, - nodeId: string - ): ComputeNodeOperationsAddUser; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/users/\{userName\}' has methods for the following verbs: delete, put */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/users/{userName}", - poolId: string, - nodeId: string, - userName: string - ): ComputeNodeOperationsDeleteUser; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}' has methods for the following verbs: get */ - ( - path: "/pools/{poolId}/nodes/{nodeId}", - poolId: string, - nodeId: string - ): ComputeNodeOperationsGet; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/reboot' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/reboot", - poolId: string, - nodeId: string - ): ComputeNodeOperationsReboot; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/reimage' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/reimage", - poolId: string, - nodeId: string - ): ComputeNodeOperationsReimage; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/disablescheduling' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/disablescheduling", - poolId: string, - nodeId: string - ): ComputeNodeOperationsDisableScheduling; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/enablescheduling' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/enablescheduling", - poolId: string, - nodeId: string - ): ComputeNodeOperationsEnableScheduling; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/remoteloginsettings' has methods for the following verbs: get */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/remoteloginsettings", - poolId: string, - nodeId: string - ): ComputeNodeOperationsGetRemoteLoginSettings; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/rdp' has methods for the following verbs: get */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/rdp", - poolId: string, - nodeId: string - ): ComputeNodeOperationsGetRemoteDesktop; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/uploadbatchservicelogs' has methods for the following verbs: post */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs", - poolId: string, - nodeId: string - ): ComputeNodeOperationsUploadBatchServiceLogs; - /** Resource for '/pools/\{poolId\}/nodes' has methods for the following verbs: get */ - (path: "/pools/{poolId}/nodes", poolId: string): ComputeNodeOperationsList; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/extensions/\{extensionName\}' has methods for the following verbs: get */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}", - poolId: string, - nodeId: string, - extensionName: string - ): ComputeNodeExtensionOperationsGet; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/extensions' has methods for the following verbs: get */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/extensions", - poolId: string, - nodeId: string - ): ComputeNodeExtensionOperationsList; -} - -export type BatchServiceClient = Client & { - path: Routes; -}; diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/index.ts b/packages/typespec-test/test/batch/generated/typespec-ts/src/index.ts deleted file mode 100644 index cad1ab7516..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/index.ts +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import BatchService from "./batchService"; - -export * from "./batchService"; -export * from "./parameters"; -export * from "./responses"; -export * from "./clientDefinitions"; -export * from "./isUnexpected"; -export * from "./models"; -export * from "./outputModels"; -export * from "./paginateHelper"; - -export default BatchService; diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/isUnexpected.ts b/packages/typespec-test/test/batch/generated/typespec-ts/src/isUnexpected.ts deleted file mode 100644 index 778b555a36..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/isUnexpected.ts +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { - ApplicationOperationsList200Response, - ApplicationOperationsListDefaultResponse, - ApplicationOperationsGet200Response, - ApplicationOperationsGetDefaultResponse, - PoolListUsageMetrics200Response, - PoolListUsageMetricsDefaultResponse, - PoolGetAllLifetimeStatistics200Response, - PoolGetAllLifetimeStatisticsDefaultResponse, - PoolAdd201Response, - PoolAddDefaultResponse, - PoolList200Response, - PoolListDefaultResponse, - PoolDelete202Response, - PoolDeleteDefaultResponse, - PoolExists404Response, - PoolExistsDefaultResponse, - PoolGet200Response, - PoolGetDefaultResponse, - PoolPatch200Response, - PoolPatchDefaultResponse, - PoolDisableAutoScale200Response, - PoolDisableAutoScaleDefaultResponse, - PoolEnableAutoScale200Response, - PoolEnableAutoScaleDefaultResponse, - PoolEvaluateAutoScale200Response, - PoolEvaluateAutoScaleDefaultResponse, - PoolResize200Response, - PoolResizeDefaultResponse, - PoolStopResize200Response, - PoolStopResizeDefaultResponse, - PoolUpdateProperties200Response, - PoolUpdatePropertiesDefaultResponse, - PoolRemoveNodes200Response, - PoolRemoveNodesDefaultResponse, - AccountListSupportedImages200Response, - AccountListSupportedImagesDefaultResponse, - AccountListPoolNodeCounts200Response, - AccountListPoolNodeCountsDefaultResponse, - JobGetAllLifetimeStatistics200Response, - JobGetAllLifetimeStatisticsDefaultResponse, - JobDelete202Response, - JobDeleteDefaultResponse, - JobGet200Response, - JobGetDefaultResponse, - JobPatch200Response, - JobPatchDefaultResponse, - JobUpdate200Response, - JobUpdateDefaultResponse, - JobDisable202Response, - JobDisableDefaultResponse, - JobEnable202Response, - JobEnableDefaultResponse, - JobTerminate202Response, - JobTerminateDefaultResponse, - JobAdd201Response, - JobAddDefaultResponse, - JobList200Response, - JobListDefaultResponse, - JobListFromJobSchedule200Response, - JobListFromJobScheduleDefaultResponse, - JobListPreparationAndReleaseTaskStatus200Response, - JobListPreparationAndReleaseTaskStatusDefaultResponse, - JobGetTaskCounts200Response, - JobGetTaskCountsDefaultResponse, - CertificateOperationsAdd201Response, - CertificateOperationsAddDefaultResponse, - CertificateOperationsList200Response, - CertificateOperationsListDefaultResponse, - CertificateOperationsCancelDeletion204Response, - CertificateOperationsCancelDeletionDefaultResponse, - CertificateOperationsDelete202Response, - CertificateOperationsDeleteDefaultResponse, - CertificateOperationsGet200Response, - CertificateOperationsGetDefaultResponse, - FileDeleteFromTask200Response, - FileDeleteFromTaskDefaultResponse, - FileGetFromTask200Response, - FileGetFromTaskDefaultResponse, - FileGetPropertiesFromTask200Response, - FileGetPropertiesFromTaskDefaultResponse, - FileDeleteFromComputeNode200Response, - FileDeleteFromComputeNodeDefaultResponse, - FileGetFromComputeNode200Response, - FileGetFromComputeNodeDefaultResponse, - FileGetPropertiesFromComputeNode200Response, - FileGetPropertiesFromComputeNodeDefaultResponse, - FileListFromTask200Response, - FileListFromTaskDefaultResponse, - FileListFromComputeNode200Response, - FileListFromComputeNodeDefaultResponse, - JobScheduleExists200Response, - JobScheduleExists204Response, - JobScheduleExistsDefaultResponse, - JobScheduleDelete202Response, - JobScheduleDeleteDefaultResponse, - JobScheduleGet200Response, - JobScheduleGetDefaultResponse, - JobSchedulePatch200Response, - JobSchedulePatchDefaultResponse, - JobScheduleUpdate200Response, - JobScheduleUpdateDefaultResponse, - JobScheduleDisable204Response, - JobScheduleDisableDefaultResponse, - JobScheduleEnable204Response, - JobScheduleEnableDefaultResponse, - JobScheduleTerminate202Response, - JobScheduleTerminateDefaultResponse, - JobScheduleAdd201Response, - JobScheduleAddDefaultResponse, - JobScheduleList200Response, - JobScheduleListDefaultResponse, - TaskAdd201Response, - TaskAddDefaultResponse, - TaskList200Response, - TaskListDefaultResponse, - TaskAddCollection200Response, - TaskAddCollectionDefaultResponse, - TaskDelete200Response, - TaskDeleteDefaultResponse, - TaskGet200Response, - TaskGetDefaultResponse, - TaskUpdate200Response, - TaskUpdateDefaultResponse, - TaskListSubtasks200Response, - TaskListSubtasksDefaultResponse, - TaskTerminate204Response, - TaskTerminateDefaultResponse, - TaskReactivate204Response, - TaskReactivateDefaultResponse, - ComputeNodeOperationsAddUser201Response, - ComputeNodeOperationsAddUserDefaultResponse, - ComputeNodeOperationsDeleteUser200Response, - ComputeNodeOperationsDeleteUserDefaultResponse, - ComputeNodeOperationsUpdateUser200Response, - ComputeNodeOperationsUpdateUserDefaultResponse, - ComputeNodeOperationsGet200Response, - ComputeNodeOperationsGetDefaultResponse, - ComputeNodeOperationsReboot202Response, - ComputeNodeOperationsRebootDefaultResponse, - ComputeNodeOperationsReimage202Response, - ComputeNodeOperationsReimageDefaultResponse, - ComputeNodeOperationsDisableScheduling200Response, - ComputeNodeOperationsDisableSchedulingDefaultResponse, - ComputeNodeOperationsEnableScheduling200Response, - ComputeNodeOperationsEnableSchedulingDefaultResponse, - ComputeNodeOperationsGetRemoteLoginSettings200Response, - ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse, - ComputeNodeOperationsGetRemoteDesktop200Response, - ComputeNodeOperationsGetRemoteDesktopDefaultResponse, - ComputeNodeOperationsUploadBatchServiceLogs200Response, - ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse, - ComputeNodeOperationsList200Response, - ComputeNodeOperationsListDefaultResponse, - ComputeNodeExtensionOperationsGet200Response, - ComputeNodeExtensionOperationsGetDefaultResponse, - ComputeNodeExtensionOperationsList200Response, - ComputeNodeExtensionOperationsListDefaultResponse, -} from "./responses"; - -const responseMap: Record = { - "GET /applications": ["200"], - "GET /applications/{applicationId}": ["200"], - "GET /poolusagemetrics": ["200"], - "GET /lifetimepoolstats": ["200"], - "POST /pools": ["201"], - "GET /pools": ["200"], - "DELETE /pools/{poolId}": ["202"], - "HEAD /pools/{poolId}": ["404"], - "GET /pools/{poolId}": ["200"], - "PATCH /pools/{poolId}": ["200"], - "POST /pools/{poolId}/disableautoscale": ["200"], - "POST /pools/{poolId}/enableautoscale": ["200"], - "POST /pools/{poolId}/evaluateautoscale": ["200"], - "POST /pools/{poolId}/resize": ["200"], - "POST /pools/{poolId}/stopresize": ["200"], - "POST /pools/{poolId}/updateproperties": ["200"], - "POST /pools/{poolId}/removenodes": ["200"], - "GET /supportedimages": ["200"], - "GET /nodecounts": ["200"], - "GET /lifetimejobstats": ["200"], - "DELETE /jobs/{jobId}": ["202"], - "GET /jobs/{jobId}": ["200"], - "PATCH /jobs/{jobId}": ["200"], - "PUT /jobs/{jobId}": ["200"], - "POST /jobs/{jobId}/disable": ["202"], - "POST /jobs/{jobId}/enable": ["202"], - "POST /jobs/{jobId}/terminate": ["202"], - "POST /jobs": ["201"], - "GET /jobs": ["200"], - "GET /jobschedules/{jobScheduleId}/jobs": ["200"], - "GET /jobs/{jobId}/jobpreparationandreleasetaskstatus": ["200"], - "GET /jobs/{jobId}/taskcounts": ["200"], - "POST /certificates": ["201"], - "GET /certificates": ["200"], - "POST /certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete": - ["204"], - "DELETE /certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})": - ["202"], - "GET /certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})": - ["200"], - "DELETE /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], - "GET /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], - "HEAD /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], - "DELETE /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], - "HEAD /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], - "GET /jobs/{jobId}/tasks/{taskId}/files": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}/files": ["200"], - "HEAD /jobschedules/{jobScheduleId}": ["200", "204"], - "DELETE /jobschedules/{jobScheduleId}": ["202"], - "GET /jobschedules/{jobScheduleId}": ["200"], - "PATCH /jobschedules/{jobScheduleId}": ["200"], - "PUT /jobschedules/{jobScheduleId}": ["200"], - "POST /jobschedules/{jobScheduleId}/disable": ["204"], - "POST /jobschedules/{jobScheduleId}/enable": ["204"], - "POST /jobschedules/{jobScheduleId}/terminate": ["202"], - "POST /jobschedules": ["201"], - "GET /jobschedules": ["200"], - "POST /jobs/{jobId}/tasks": ["201"], - "GET /jobs/{jobId}/tasks": ["200"], - "POST /jobs/{jobId}/addtaskcollection": ["200"], - "DELETE /jobs/{jobId}/tasks/{taskId}": ["200"], - "GET /jobs/{jobId}/tasks/{taskId}": ["200"], - "PUT /jobs/{jobId}/tasks/{taskId}": ["200"], - "GET /jobs/{jobId}/tasks/{taskId}/subtasksinfo": ["200"], - "POST /jobs/{jobId}/tasks/{taskId}/terminate": ["204"], - "POST /jobs/{jobId}/tasks/{taskId}/reactivate": ["204"], - "POST /pools/{poolId}/nodes/{nodeId}/users": ["201"], - "DELETE /pools/{poolId}/nodes/{nodeId}/users/{userName}": ["200"], - "PUT /pools/{poolId}/nodes/{nodeId}/users/{userName}": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}": ["200"], - "POST /pools/{poolId}/nodes/{nodeId}/reboot": ["202"], - "POST /pools/{poolId}/nodes/{nodeId}/reimage": ["202"], - "POST /pools/{poolId}/nodes/{nodeId}/disablescheduling": ["200"], - "POST /pools/{poolId}/nodes/{nodeId}/enablescheduling": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}/remoteloginsettings": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}/rdp": ["200"], - "POST /pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs": ["200"], - "GET /pools/{poolId}/nodes": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}/extensions": ["200"], -}; - -export function isUnexpected( - response: - | ApplicationOperationsList200Response - | ApplicationOperationsListDefaultResponse -): response is ApplicationOperationsListDefaultResponse; -export function isUnexpected( - response: - | ApplicationOperationsGet200Response - | ApplicationOperationsGetDefaultResponse -): response is ApplicationOperationsGetDefaultResponse; -export function isUnexpected( - response: - | PoolListUsageMetrics200Response - | PoolListUsageMetricsDefaultResponse -): response is PoolListUsageMetricsDefaultResponse; -export function isUnexpected( - response: - | PoolGetAllLifetimeStatistics200Response - | PoolGetAllLifetimeStatisticsDefaultResponse -): response is PoolGetAllLifetimeStatisticsDefaultResponse; -export function isUnexpected( - response: PoolAdd201Response | PoolAddDefaultResponse -): response is PoolAddDefaultResponse; -export function isUnexpected( - response: PoolList200Response | PoolListDefaultResponse -): response is PoolListDefaultResponse; -export function isUnexpected( - response: PoolDelete202Response | PoolDeleteDefaultResponse -): response is PoolDeleteDefaultResponse; -export function isUnexpected( - response: PoolExists404Response | PoolExistsDefaultResponse -): response is PoolExistsDefaultResponse; -export function isUnexpected( - response: PoolGet200Response | PoolGetDefaultResponse -): response is PoolGetDefaultResponse; -export function isUnexpected( - response: PoolPatch200Response | PoolPatchDefaultResponse -): response is PoolPatchDefaultResponse; -export function isUnexpected( - response: - | PoolDisableAutoScale200Response - | PoolDisableAutoScaleDefaultResponse -): response is PoolDisableAutoScaleDefaultResponse; -export function isUnexpected( - response: PoolEnableAutoScale200Response | PoolEnableAutoScaleDefaultResponse -): response is PoolEnableAutoScaleDefaultResponse; -export function isUnexpected( - response: - | PoolEvaluateAutoScale200Response - | PoolEvaluateAutoScaleDefaultResponse -): response is PoolEvaluateAutoScaleDefaultResponse; -export function isUnexpected( - response: PoolResize200Response | PoolResizeDefaultResponse -): response is PoolResizeDefaultResponse; -export function isUnexpected( - response: PoolStopResize200Response | PoolStopResizeDefaultResponse -): response is PoolStopResizeDefaultResponse; -export function isUnexpected( - response: - | PoolUpdateProperties200Response - | PoolUpdatePropertiesDefaultResponse -): response is PoolUpdatePropertiesDefaultResponse; -export function isUnexpected( - response: PoolRemoveNodes200Response | PoolRemoveNodesDefaultResponse -): response is PoolRemoveNodesDefaultResponse; -export function isUnexpected( - response: - | AccountListSupportedImages200Response - | AccountListSupportedImagesDefaultResponse -): response is AccountListSupportedImagesDefaultResponse; -export function isUnexpected( - response: - | AccountListPoolNodeCounts200Response - | AccountListPoolNodeCountsDefaultResponse -): response is AccountListPoolNodeCountsDefaultResponse; -export function isUnexpected( - response: - | JobGetAllLifetimeStatistics200Response - | JobGetAllLifetimeStatisticsDefaultResponse -): response is JobGetAllLifetimeStatisticsDefaultResponse; -export function isUnexpected( - response: JobDelete202Response | JobDeleteDefaultResponse -): response is JobDeleteDefaultResponse; -export function isUnexpected( - response: JobGet200Response | JobGetDefaultResponse -): response is JobGetDefaultResponse; -export function isUnexpected( - response: JobPatch200Response | JobPatchDefaultResponse -): response is JobPatchDefaultResponse; -export function isUnexpected( - response: JobUpdate200Response | JobUpdateDefaultResponse -): response is JobUpdateDefaultResponse; -export function isUnexpected( - response: JobDisable202Response | JobDisableDefaultResponse -): response is JobDisableDefaultResponse; -export function isUnexpected( - response: JobEnable202Response | JobEnableDefaultResponse -): response is JobEnableDefaultResponse; -export function isUnexpected( - response: JobTerminate202Response | JobTerminateDefaultResponse -): response is JobTerminateDefaultResponse; -export function isUnexpected( - response: JobAdd201Response | JobAddDefaultResponse -): response is JobAddDefaultResponse; -export function isUnexpected( - response: JobList200Response | JobListDefaultResponse -): response is JobListDefaultResponse; -export function isUnexpected( - response: - | JobListFromJobSchedule200Response - | JobListFromJobScheduleDefaultResponse -): response is JobListFromJobScheduleDefaultResponse; -export function isUnexpected( - response: - | JobListPreparationAndReleaseTaskStatus200Response - | JobListPreparationAndReleaseTaskStatusDefaultResponse -): response is JobListPreparationAndReleaseTaskStatusDefaultResponse; -export function isUnexpected( - response: JobGetTaskCounts200Response | JobGetTaskCountsDefaultResponse -): response is JobGetTaskCountsDefaultResponse; -export function isUnexpected( - response: - | CertificateOperationsAdd201Response - | CertificateOperationsAddDefaultResponse -): response is CertificateOperationsAddDefaultResponse; -export function isUnexpected( - response: - | CertificateOperationsList200Response - | CertificateOperationsListDefaultResponse -): response is CertificateOperationsListDefaultResponse; -export function isUnexpected( - response: - | CertificateOperationsCancelDeletion204Response - | CertificateOperationsCancelDeletionDefaultResponse -): response is CertificateOperationsCancelDeletionDefaultResponse; -export function isUnexpected( - response: - | CertificateOperationsDelete202Response - | CertificateOperationsDeleteDefaultResponse -): response is CertificateOperationsDeleteDefaultResponse; -export function isUnexpected( - response: - | CertificateOperationsGet200Response - | CertificateOperationsGetDefaultResponse -): response is CertificateOperationsGetDefaultResponse; -export function isUnexpected( - response: FileDeleteFromTask200Response | FileDeleteFromTaskDefaultResponse -): response is FileDeleteFromTaskDefaultResponse; -export function isUnexpected( - response: FileGetFromTask200Response | FileGetFromTaskDefaultResponse -): response is FileGetFromTaskDefaultResponse; -export function isUnexpected( - response: - | FileGetPropertiesFromTask200Response - | FileGetPropertiesFromTaskDefaultResponse -): response is FileGetPropertiesFromTaskDefaultResponse; -export function isUnexpected( - response: - | FileDeleteFromComputeNode200Response - | FileDeleteFromComputeNodeDefaultResponse -): response is FileDeleteFromComputeNodeDefaultResponse; -export function isUnexpected( - response: - | FileGetFromComputeNode200Response - | FileGetFromComputeNodeDefaultResponse -): response is FileGetFromComputeNodeDefaultResponse; -export function isUnexpected( - response: - | FileGetPropertiesFromComputeNode200Response - | FileGetPropertiesFromComputeNodeDefaultResponse -): response is FileGetPropertiesFromComputeNodeDefaultResponse; -export function isUnexpected( - response: FileListFromTask200Response | FileListFromTaskDefaultResponse -): response is FileListFromTaskDefaultResponse; -export function isUnexpected( - response: - | FileListFromComputeNode200Response - | FileListFromComputeNodeDefaultResponse -): response is FileListFromComputeNodeDefaultResponse; -export function isUnexpected( - response: - | JobScheduleExists200Response - | JobScheduleExists204Response - | JobScheduleExistsDefaultResponse -): response is JobScheduleExistsDefaultResponse; -export function isUnexpected( - response: JobScheduleDelete202Response | JobScheduleDeleteDefaultResponse -): response is JobScheduleDeleteDefaultResponse; -export function isUnexpected( - response: JobScheduleGet200Response | JobScheduleGetDefaultResponse -): response is JobScheduleGetDefaultResponse; -export function isUnexpected( - response: JobSchedulePatch200Response | JobSchedulePatchDefaultResponse -): response is JobSchedulePatchDefaultResponse; -export function isUnexpected( - response: JobScheduleUpdate200Response | JobScheduleUpdateDefaultResponse -): response is JobScheduleUpdateDefaultResponse; -export function isUnexpected( - response: JobScheduleDisable204Response | JobScheduleDisableDefaultResponse -): response is JobScheduleDisableDefaultResponse; -export function isUnexpected( - response: JobScheduleEnable204Response | JobScheduleEnableDefaultResponse -): response is JobScheduleEnableDefaultResponse; -export function isUnexpected( - response: - | JobScheduleTerminate202Response - | JobScheduleTerminateDefaultResponse -): response is JobScheduleTerminateDefaultResponse; -export function isUnexpected( - response: JobScheduleAdd201Response | JobScheduleAddDefaultResponse -): response is JobScheduleAddDefaultResponse; -export function isUnexpected( - response: JobScheduleList200Response | JobScheduleListDefaultResponse -): response is JobScheduleListDefaultResponse; -export function isUnexpected( - response: TaskAdd201Response | TaskAddDefaultResponse -): response is TaskAddDefaultResponse; -export function isUnexpected( - response: TaskList200Response | TaskListDefaultResponse -): response is TaskListDefaultResponse; -export function isUnexpected( - response: TaskAddCollection200Response | TaskAddCollectionDefaultResponse -): response is TaskAddCollectionDefaultResponse; -export function isUnexpected( - response: TaskDelete200Response | TaskDeleteDefaultResponse -): response is TaskDeleteDefaultResponse; -export function isUnexpected( - response: TaskGet200Response | TaskGetDefaultResponse -): response is TaskGetDefaultResponse; -export function isUnexpected( - response: TaskUpdate200Response | TaskUpdateDefaultResponse -): response is TaskUpdateDefaultResponse; -export function isUnexpected( - response: TaskListSubtasks200Response | TaskListSubtasksDefaultResponse -): response is TaskListSubtasksDefaultResponse; -export function isUnexpected( - response: TaskTerminate204Response | TaskTerminateDefaultResponse -): response is TaskTerminateDefaultResponse; -export function isUnexpected( - response: TaskReactivate204Response | TaskReactivateDefaultResponse -): response is TaskReactivateDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsAddUser201Response - | ComputeNodeOperationsAddUserDefaultResponse -): response is ComputeNodeOperationsAddUserDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsDeleteUser200Response - | ComputeNodeOperationsDeleteUserDefaultResponse -): response is ComputeNodeOperationsDeleteUserDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsUpdateUser200Response - | ComputeNodeOperationsUpdateUserDefaultResponse -): response is ComputeNodeOperationsUpdateUserDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsGet200Response - | ComputeNodeOperationsGetDefaultResponse -): response is ComputeNodeOperationsGetDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsReboot202Response - | ComputeNodeOperationsRebootDefaultResponse -): response is ComputeNodeOperationsRebootDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsReimage202Response - | ComputeNodeOperationsReimageDefaultResponse -): response is ComputeNodeOperationsReimageDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsDisableScheduling200Response - | ComputeNodeOperationsDisableSchedulingDefaultResponse -): response is ComputeNodeOperationsDisableSchedulingDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsEnableScheduling200Response - | ComputeNodeOperationsEnableSchedulingDefaultResponse -): response is ComputeNodeOperationsEnableSchedulingDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsGetRemoteLoginSettings200Response - | ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse -): response is ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsGetRemoteDesktop200Response - | ComputeNodeOperationsGetRemoteDesktopDefaultResponse -): response is ComputeNodeOperationsGetRemoteDesktopDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsUploadBatchServiceLogs200Response - | ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse -): response is ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeOperationsList200Response - | ComputeNodeOperationsListDefaultResponse -): response is ComputeNodeOperationsListDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeExtensionOperationsGet200Response - | ComputeNodeExtensionOperationsGetDefaultResponse -): response is ComputeNodeExtensionOperationsGetDefaultResponse; -export function isUnexpected( - response: - | ComputeNodeExtensionOperationsList200Response - | ComputeNodeExtensionOperationsListDefaultResponse -): response is ComputeNodeExtensionOperationsListDefaultResponse; -export function isUnexpected( - response: - | ApplicationOperationsList200Response - | ApplicationOperationsListDefaultResponse - | ApplicationOperationsGet200Response - | ApplicationOperationsGetDefaultResponse - | PoolListUsageMetrics200Response - | PoolListUsageMetricsDefaultResponse - | PoolGetAllLifetimeStatistics200Response - | PoolGetAllLifetimeStatisticsDefaultResponse - | PoolAdd201Response - | PoolAddDefaultResponse - | PoolList200Response - | PoolListDefaultResponse - | PoolDelete202Response - | PoolDeleteDefaultResponse - | PoolExists404Response - | PoolExistsDefaultResponse - | PoolGet200Response - | PoolGetDefaultResponse - | PoolPatch200Response - | PoolPatchDefaultResponse - | PoolDisableAutoScale200Response - | PoolDisableAutoScaleDefaultResponse - | PoolEnableAutoScale200Response - | PoolEnableAutoScaleDefaultResponse - | PoolEvaluateAutoScale200Response - | PoolEvaluateAutoScaleDefaultResponse - | PoolResize200Response - | PoolResizeDefaultResponse - | PoolStopResize200Response - | PoolStopResizeDefaultResponse - | PoolUpdateProperties200Response - | PoolUpdatePropertiesDefaultResponse - | PoolRemoveNodes200Response - | PoolRemoveNodesDefaultResponse - | AccountListSupportedImages200Response - | AccountListSupportedImagesDefaultResponse - | AccountListPoolNodeCounts200Response - | AccountListPoolNodeCountsDefaultResponse - | JobGetAllLifetimeStatistics200Response - | JobGetAllLifetimeStatisticsDefaultResponse - | JobDelete202Response - | JobDeleteDefaultResponse - | JobGet200Response - | JobGetDefaultResponse - | JobPatch200Response - | JobPatchDefaultResponse - | JobUpdate200Response - | JobUpdateDefaultResponse - | JobDisable202Response - | JobDisableDefaultResponse - | JobEnable202Response - | JobEnableDefaultResponse - | JobTerminate202Response - | JobTerminateDefaultResponse - | JobAdd201Response - | JobAddDefaultResponse - | JobList200Response - | JobListDefaultResponse - | JobListFromJobSchedule200Response - | JobListFromJobScheduleDefaultResponse - | JobListPreparationAndReleaseTaskStatus200Response - | JobListPreparationAndReleaseTaskStatusDefaultResponse - | JobGetTaskCounts200Response - | JobGetTaskCountsDefaultResponse - | CertificateOperationsAdd201Response - | CertificateOperationsAddDefaultResponse - | CertificateOperationsList200Response - | CertificateOperationsListDefaultResponse - | CertificateOperationsCancelDeletion204Response - | CertificateOperationsCancelDeletionDefaultResponse - | CertificateOperationsDelete202Response - | CertificateOperationsDeleteDefaultResponse - | CertificateOperationsGet200Response - | CertificateOperationsGetDefaultResponse - | FileDeleteFromTask200Response - | FileDeleteFromTaskDefaultResponse - | FileGetFromTask200Response - | FileGetFromTaskDefaultResponse - | FileGetPropertiesFromTask200Response - | FileGetPropertiesFromTaskDefaultResponse - | FileDeleteFromComputeNode200Response - | FileDeleteFromComputeNodeDefaultResponse - | FileGetFromComputeNode200Response - | FileGetFromComputeNodeDefaultResponse - | FileGetPropertiesFromComputeNode200Response - | FileGetPropertiesFromComputeNodeDefaultResponse - | FileListFromTask200Response - | FileListFromTaskDefaultResponse - | FileListFromComputeNode200Response - | FileListFromComputeNodeDefaultResponse - | JobScheduleExists200Response - | JobScheduleExists204Response - | JobScheduleExistsDefaultResponse - | JobScheduleDelete202Response - | JobScheduleDeleteDefaultResponse - | JobScheduleGet200Response - | JobScheduleGetDefaultResponse - | JobSchedulePatch200Response - | JobSchedulePatchDefaultResponse - | JobScheduleUpdate200Response - | JobScheduleUpdateDefaultResponse - | JobScheduleDisable204Response - | JobScheduleDisableDefaultResponse - | JobScheduleEnable204Response - | JobScheduleEnableDefaultResponse - | JobScheduleTerminate202Response - | JobScheduleTerminateDefaultResponse - | JobScheduleAdd201Response - | JobScheduleAddDefaultResponse - | JobScheduleList200Response - | JobScheduleListDefaultResponse - | TaskAdd201Response - | TaskAddDefaultResponse - | TaskList200Response - | TaskListDefaultResponse - | TaskAddCollection200Response - | TaskAddCollectionDefaultResponse - | TaskDelete200Response - | TaskDeleteDefaultResponse - | TaskGet200Response - | TaskGetDefaultResponse - | TaskUpdate200Response - | TaskUpdateDefaultResponse - | TaskListSubtasks200Response - | TaskListSubtasksDefaultResponse - | TaskTerminate204Response - | TaskTerminateDefaultResponse - | TaskReactivate204Response - | TaskReactivateDefaultResponse - | ComputeNodeOperationsAddUser201Response - | ComputeNodeOperationsAddUserDefaultResponse - | ComputeNodeOperationsDeleteUser200Response - | ComputeNodeOperationsDeleteUserDefaultResponse - | ComputeNodeOperationsUpdateUser200Response - | ComputeNodeOperationsUpdateUserDefaultResponse - | ComputeNodeOperationsGet200Response - | ComputeNodeOperationsGetDefaultResponse - | ComputeNodeOperationsReboot202Response - | ComputeNodeOperationsRebootDefaultResponse - | ComputeNodeOperationsReimage202Response - | ComputeNodeOperationsReimageDefaultResponse - | ComputeNodeOperationsDisableScheduling200Response - | ComputeNodeOperationsDisableSchedulingDefaultResponse - | ComputeNodeOperationsEnableScheduling200Response - | ComputeNodeOperationsEnableSchedulingDefaultResponse - | ComputeNodeOperationsGetRemoteLoginSettings200Response - | ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse - | ComputeNodeOperationsGetRemoteDesktop200Response - | ComputeNodeOperationsGetRemoteDesktopDefaultResponse - | ComputeNodeOperationsUploadBatchServiceLogs200Response - | ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse - | ComputeNodeOperationsList200Response - | ComputeNodeOperationsListDefaultResponse - | ComputeNodeExtensionOperationsGet200Response - | ComputeNodeExtensionOperationsGetDefaultResponse - | ComputeNodeExtensionOperationsList200Response - | ComputeNodeExtensionOperationsListDefaultResponse -): response is - | ApplicationOperationsListDefaultResponse - | ApplicationOperationsGetDefaultResponse - | PoolListUsageMetricsDefaultResponse - | PoolGetAllLifetimeStatisticsDefaultResponse - | PoolAddDefaultResponse - | PoolListDefaultResponse - | PoolDeleteDefaultResponse - | PoolExistsDefaultResponse - | PoolGetDefaultResponse - | PoolPatchDefaultResponse - | PoolDisableAutoScaleDefaultResponse - | PoolEnableAutoScaleDefaultResponse - | PoolEvaluateAutoScaleDefaultResponse - | PoolResizeDefaultResponse - | PoolStopResizeDefaultResponse - | PoolUpdatePropertiesDefaultResponse - | PoolRemoveNodesDefaultResponse - | AccountListSupportedImagesDefaultResponse - | AccountListPoolNodeCountsDefaultResponse - | JobGetAllLifetimeStatisticsDefaultResponse - | JobDeleteDefaultResponse - | JobGetDefaultResponse - | JobPatchDefaultResponse - | JobUpdateDefaultResponse - | JobDisableDefaultResponse - | JobEnableDefaultResponse - | JobTerminateDefaultResponse - | JobAddDefaultResponse - | JobListDefaultResponse - | JobListFromJobScheduleDefaultResponse - | JobListPreparationAndReleaseTaskStatusDefaultResponse - | JobGetTaskCountsDefaultResponse - | CertificateOperationsAddDefaultResponse - | CertificateOperationsListDefaultResponse - | CertificateOperationsCancelDeletionDefaultResponse - | CertificateOperationsDeleteDefaultResponse - | CertificateOperationsGetDefaultResponse - | FileDeleteFromTaskDefaultResponse - | FileGetFromTaskDefaultResponse - | FileGetPropertiesFromTaskDefaultResponse - | FileDeleteFromComputeNodeDefaultResponse - | FileGetFromComputeNodeDefaultResponse - | FileGetPropertiesFromComputeNodeDefaultResponse - | FileListFromTaskDefaultResponse - | FileListFromComputeNodeDefaultResponse - | JobScheduleExistsDefaultResponse - | JobScheduleDeleteDefaultResponse - | JobScheduleGetDefaultResponse - | JobSchedulePatchDefaultResponse - | JobScheduleUpdateDefaultResponse - | JobScheduleDisableDefaultResponse - | JobScheduleEnableDefaultResponse - | JobScheduleTerminateDefaultResponse - | JobScheduleAddDefaultResponse - | JobScheduleListDefaultResponse - | TaskAddDefaultResponse - | TaskListDefaultResponse - | TaskAddCollectionDefaultResponse - | TaskDeleteDefaultResponse - | TaskGetDefaultResponse - | TaskUpdateDefaultResponse - | TaskListSubtasksDefaultResponse - | TaskTerminateDefaultResponse - | TaskReactivateDefaultResponse - | ComputeNodeOperationsAddUserDefaultResponse - | ComputeNodeOperationsDeleteUserDefaultResponse - | ComputeNodeOperationsUpdateUserDefaultResponse - | ComputeNodeOperationsGetDefaultResponse - | ComputeNodeOperationsRebootDefaultResponse - | ComputeNodeOperationsReimageDefaultResponse - | ComputeNodeOperationsDisableSchedulingDefaultResponse - | ComputeNodeOperationsEnableSchedulingDefaultResponse - | ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse - | ComputeNodeOperationsGetRemoteDesktopDefaultResponse - | ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse - | ComputeNodeOperationsListDefaultResponse - | ComputeNodeExtensionOperationsGetDefaultResponse - | ComputeNodeExtensionOperationsListDefaultResponse { - const lroOriginal = response.headers["x-ms-original-url"]; - const url = new URL(lroOriginal ?? response.request.url); - const method = response.request.method; - let pathDetails = responseMap[`${method} ${url.pathname}`]; - if (!pathDetails) { - pathDetails = getParametrizedPathSuccess(method, url.pathname); - } - return !pathDetails.includes(response.status); -} - -function getParametrizedPathSuccess(method: string, path: string): string[] { - const pathParts = path.split("/"); - - // Traverse list to match the longest candidate - // matchedLen: the length of candidate path - // matchedValue: the matched status code array - let matchedLen = -1, - matchedValue: string[] = []; - - // Iterate the responseMap to find a match - for (const [key, value] of Object.entries(responseMap)) { - // Extracting the path from the map key which is in format - // GET /path/foo - if (!key.startsWith(method)) { - continue; - } - const candidatePath = getPathFromMapKey(key); - // Get each part of the url path - const candidateParts = candidatePath.split("/"); - - // track if we have found a match to return the values found. - let found = true; - for ( - let i = candidateParts.length - 1, j = pathParts.length - 1; - i >= 1 && j >= 1; - i--, j-- - ) { - if ( - candidateParts[i]?.startsWith("{") && - candidateParts[i]?.indexOf("}") !== -1 - ) { - const start = candidateParts[i]!.indexOf("}") + 1, - end = candidateParts[i]?.length; - // If the current part of the candidate is a "template" part - // Try to use the suffix of pattern to match the path - // {guid} ==> $ - // {guid}:export ==> :export$ - const isMatched = new RegExp( - `${candidateParts[i]?.slice(start, end)}` - ).test(pathParts[j] || ""); - - if (!isMatched) { - found = false; - break; - } - continue; - } - - // If the candidate part is not a template and - // the parts don't match mark the candidate as not found - // to move on with the next candidate path. - if (candidateParts[i] !== pathParts[j]) { - found = false; - break; - } - } - - // We finished evaluating the current candidate parts - // Update the matched value if and only if we found the longer pattern - if (found && candidatePath.length > matchedLen) { - matchedLen = candidatePath.length; - matchedValue = value; - } - } - - return matchedValue; -} - -function getPathFromMapKey(mapKey: string): string { - const pathStart = mapKey.indexOf("/"); - return mapKey.slice(pathStart); -} diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/models.ts b/packages/typespec-test/test/batch/generated/typespec-ts/src/models.ts deleted file mode 100644 index 0825de8eae..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/models.ts +++ /dev/null @@ -1,2991 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -/** Contains utilization and resource usage statistics for the lifetime of a Pool. */ -export interface PoolStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** Statistics related to Pool usage information. */ - usageStats?: UsageStatistics; - /** Statistics related to resource consumption by Compute Nodes in a Pool. */ - resourceStats?: ResourceStatistics; -} - -/** Statistics related to Pool usage information. */ -export interface UsageStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** - * The aggregated wall-clock time of the dedicated Compute Node cores being part - * of the Pool. - */ - dedicatedCoreTime: string; -} - -/** Statistics related to resource consumption by Compute Nodes in a Pool. */ -export interface ResourceStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** - * The average CPU usage across all Compute Nodes in the Pool (percentage per - * node). - */ - avgCPUPercentage: number; - /** The average memory usage in GiB across all Compute Nodes in the Pool. */ - avgMemoryGiB: number; - /** The peak memory usage in GiB across all Compute Nodes in the Pool. */ - peakMemoryGiB: number; - /** The average used disk space in GiB across all Compute Nodes in the Pool. */ - avgDiskGiB: number; - /** The peak used disk space in GiB across all Compute Nodes in the Pool. */ - peakDiskGiB: number; - /** The total number of disk read operations across all Compute Nodes in the Pool. */ - diskReadIOps: number; - /** The total number of disk write operations across all Compute Nodes in the Pool. */ - diskWriteIOps: number; - /** - * The total amount of data in GiB of disk reads across all Compute Nodes in the - * Pool. - */ - diskReadGiB: number; - /** - * The total amount of data in GiB of disk writes across all Compute Nodes in the - * Pool. - */ - diskWriteGiB: number; - /** - * The total amount of data in GiB of network reads across all Compute Nodes in - * the Pool. - */ - networkReadGiB: number; - /** - * The total amount of data in GiB of network writes across all Compute Nodes in - * the Pool. - */ - networkWriteGiB: number; -} - -/** A Pool in the Azure Batch service. */ -export interface BatchPool { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores, and cannot contain more than 64 characters. The ID is - * case-preserving and case-insensitive (that is, you may not have two IDs within - * an Account that differ only by case). - */ - id?: string; - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ - displayName?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ - vmSize?: string; - /** - * This property and virtualMachineConfiguration are mutually exclusive and one of - * the properties must be specified. This property cannot be specified if the - * Batch Account was created with its poolAllocationMode property set to - * 'UserSubscription'. - */ - cloudServiceConfiguration?: CloudServiceConfiguration; - /** - * This property and cloudServiceConfiguration are mutually exclusive and one of - * the properties must be specified. - */ - virtualMachineConfiguration?: VirtualMachineConfiguration; - /** - * This is the timeout for the most recent resize operation. (The initial sizing - * when the Pool is created counts as a resize.) The default value is 15 minutes. - */ - resizeTimeout?: string; - /** The desired number of dedicated Compute Nodes in the Pool. */ - targetDedicatedNodes?: number; - /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ - targetLowPriorityNodes?: number; - /** - * If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must - * be specified. If true, the autoScaleFormula property is required and the Pool - * automatically resizes according to the formula. The default value is false. - */ - enableAutoScale?: boolean; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ - autoScaleFormula?: string; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ - autoScaleEvaluationInterval?: string; - /** - * This imposes restrictions on which Compute Nodes can be assigned to the Pool. - * Specifying this value can reduce the chance of the requested number of Compute - * Nodes to be allocated in the Pool. - */ - enableInterNodeCommunication?: boolean; - /** The network configuration for a Pool. */ - networkConfiguration?: NetworkConfiguration; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ - startTask?: StartTask; - /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. - */ - certificateReferences?: Array; - /** - * Changes to Package references affect all new Nodes joining the Pool, but do not - * affect Compute Nodes that are already in the Pool until they are rebooted or - * reimaged. There is a maximum of 10 Package references on any given Pool. - */ - applicationPackageReferences?: Array; - /** - * The list of application licenses must be a subset of available Batch service - * application licenses. If a license is requested which is not supported, Pool - * creation will fail. - */ - applicationLicenses?: string[]; - /** - * The default value is 1. The maximum value is the smaller of 4 times the number - * of cores of the vmSize of the pool or 256. - */ - taskSlotsPerNode?: number; - /** If not specified, the default is spread. */ - taskSchedulingPolicy?: TaskSchedulingPolicy; - /** The list of user Accounts to be created on each Compute Node in the Pool. */ - userAccounts?: Array; - /** A list of name-value pairs associated with the Pool as metadata. */ - metadata?: Array; - /** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ - mountConfiguration?: Array; - /** - * If omitted, the default value is Default. - * - * Possible values: default, classic, simplified - */ - targetNodeCommunicationMode?: string; -} - -/** - * The configuration for Compute Nodes in a Pool based on the Azure Cloud Services - * platform. - */ -export interface CloudServiceConfiguration { - /** - * Possible values are: - * 2 - OS Family 2, equivalent to Windows Server 2008 R2 - * SP1. - * 3 - OS Family 3, equivalent to Windows Server 2012. - * 4 - OS Family 4, - * equivalent to Windows Server 2012 R2. - * 5 - OS Family 5, equivalent to Windows - * Server 2016. - * 6 - OS Family 6, equivalent to Windows Server 2019. For more - * information, see Azure Guest OS Releases - * (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). - */ - osFamily: string; - /** - * The default value is * which specifies the latest operating system version for - * the specified OS family. - */ - osVersion?: string; -} - -/** - * The configuration for Compute Nodes in a Pool based on the Azure Virtual - * Machines infrastructure. - */ -export interface VirtualMachineConfiguration { - /** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ - imageReference: ImageReference; - /** - * The Batch Compute Node agent is a program that runs on each Compute Node in the - * Pool, and provides the command-and-control interface between the Compute Node - * and the Batch service. There are different implementations of the Compute Node - * agent, known as SKUs, for different operating systems. You must specify a - * Compute Node agent SKU which matches the selected Image reference. To get the - * list of supported Compute Node agent SKUs along with their list of verified - * Image references, see the 'List supported Compute Node agent SKUs' operation. - */ - nodeAgentSKUId: string; - /** - * This property must not be specified if the imageReference property specifies a - * Linux OS Image. - */ - windowsConfiguration?: WindowsConfiguration; - /** - * This property must be specified if the Compute Nodes in the Pool need to have - * empty data disks attached to them. This cannot be updated. Each Compute Node - * gets its own disk (the disk is not a file share). Existing disks cannot be - * attached, each attached disk is empty. When the Compute Node is removed from - * the Pool, the disk and all data associated with it is also deleted. The disk is - * not formatted after being attached, it must be formatted before use - for more - * information see - * https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux - * and - * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. - */ - dataDisks?: Array; - /** - * This only applies to Images that contain the Windows operating system, and - * should only be used when you hold valid on-premises licenses for the Compute - * Nodes which will be deployed. If omitted, no on-premises licensing discount is - * applied. Values are: - * - * Windows_Server - The on-premises license is for Windows - * Server. - * Windows_Client - The on-premises license is for Windows Client. - * - */ - licenseType?: string; - /** - * If specified, setup is performed on each Compute Node in the Pool to allow - * Tasks to run in containers. All regular Tasks and Job manager Tasks run on this - * Pool must specify the containerSettings property, and all other Tasks may - * specify it. - */ - containerConfiguration?: ContainerConfiguration; - /** - * If specified, encryption is performed on each node in the pool during node - * provisioning. - */ - diskEncryptionConfiguration?: DiskEncryptionConfiguration; - /** - * This configuration will specify rules on how nodes in the pool will be - * physically allocated. - */ - nodePlacementConfiguration?: NodePlacementConfiguration; - /** - * If specified, the extensions mentioned in this configuration will be installed - * on each node. - */ - extensions?: Array; - /** Settings for the operating system disk of the compute node (VM). */ - osDisk?: OSDisk; -} - -/** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ -export interface ImageReference { - /** For example, Canonical or MicrosoftWindowsServer. */ - publisher?: string; - /** For example, UbuntuServer or WindowsServer. */ - offer?: string; - /** For example, 18.04-LTS or 2019-Datacenter. */ - sku?: string; - /** - * A value of 'latest' can be specified to select the latest version of an Image. - * If omitted, the default is 'latest'. - */ - version?: string; - /** - * This property is mutually exclusive with other ImageReference properties. The - * Shared Image Gallery Image must have replicas in the same region and must be in - * the same subscription as the Azure Batch account. If the image version is not - * specified in the imageId, the latest version will be used. For information - * about the firewall settings for the Batch Compute Node agent to communicate - * with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - */ - virtualMachineImageId?: string; -} - -/** Windows operating system settings to apply to the virtual machine. */ -export interface WindowsConfiguration { - /** If omitted, the default value is true. */ - enableAutomaticUpdates?: boolean; -} - -/** - * Settings which will be used by the data disks associated to Compute Nodes in - * the Pool. When using attached data disks, you need to mount and format the - * disks from within a VM to use them. - */ -export interface DataDisk { - /** - * The lun is used to uniquely identify each data disk. If attaching multiple - * disks, each should have a distinct lun. The value must be between 0 and 63, - * inclusive. - */ - lun: number; - /** - * The default value for caching is readwrite. For information about the caching - * options see: - * https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - * - * Possible values: none, readonly, readwrite - */ - caching?: string; - /** The initial disk size in gigabytes. */ - diskSizeGB: number; - /** - * If omitted, the default is "standard_lrs". - * - * Possible values: standard_lrs, premium_lrs - */ - storageAccountType?: string; -} - -/** The configuration for container-enabled Pools. */ -export interface ContainerConfiguration { - /** - * The container technology to be used. - * - * Possible values: dockerCompatible - */ - type: string; - /** - * This is the full Image reference, as would be specified to "docker pull". An - * Image will be sourced from the default Docker registry unless the Image is - * fully qualified with an alternative registry. - */ - containerImageNames?: string[]; - /** - * If any Images must be downloaded from a private registry which requires - * credentials, then those credentials must be provided here. - */ - containerRegistries?: Array; -} - -/** A private container registry. */ -export interface ContainerRegistry { - /** The user name to log into the registry server. */ - username?: string; - /** The password to log into the registry server. */ - password?: string; - /** If omitted, the default is "docker.io". */ - registryServer?: string; - /** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ - identityReference?: ComputeNodeIdentityReference; -} - -/** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ -export interface ComputeNodeIdentityReference { - /** The ARM resource id of the user assigned identity. */ - resourceId?: string; -} - -/** - * The disk encryption configuration applied on compute nodes in the pool. Disk - * encryption configuration is not supported on Linux pool created with Shared - * Image Gallery Image. - */ -export interface DiskEncryptionConfiguration { - /** - * If omitted, no disks on the compute nodes in the pool will be encrypted. On - * Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" - * and "TemporaryDisk" must be specified. - */ - targets?: string[]; -} - -/** - * For regional placement, nodes in the pool will be allocated in the same region. - * For zonal placement, nodes in the pool will be spread across different zones - * with best effort balancing. - */ -export interface NodePlacementConfiguration { - /** - * Allocation policy used by Batch Service to provision the nodes. If not - * specified, Batch will use the regional policy. - * - * Possible values: regional, zonal - */ - policy?: string; -} - -/** The configuration for virtual machine extensions. */ -export interface VMExtension { - /** The name of the virtual machine extension. */ - name: string; - /** The name of the extension handler publisher. */ - publisher: string; - /** The type of the extension. */ - type: string; - /** The version of script handler. */ - typeHandlerVersion?: string; - /** - * Indicates whether the extension should use a newer minor version if one is - * available at deployment time. Once deployed, however, the extension will not - * upgrade minor versions unless redeployed, even with this property set to true. - */ - autoUpgradeMinorVersion?: boolean; - /** JSON formatted public settings for the extension. */ - settings?: Object; - /** - * The extension can contain either protectedSettings or - * protectedSettingsFromKeyVault or no protected settings at all. - */ - protectedSettings?: Object; - /** - * Collection of extension names after which this extension needs to be - * provisioned. - */ - provisionAfterExtensions?: string[]; -} - -export interface Object {} - -/** Settings for the operating system disk of the compute node (VM). */ -export interface OSDisk { - /** - * Specifies the ephemeral Disk Settings for the operating system disk used by the - * compute node (VM). - */ - ephemeralOSDiskSettings?: DiffDiskSettings; -} - -/** - * Specifies the ephemeral Disk Settings for the operating system disk used by the - * compute node (VM). - */ -export interface DiffDiskSettings { - /** - * This property can be used by user in the request to choose the location e.g., - * cache disk space for Ephemeral OS disk provisioning. For more information on - * Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size - * requirements for Windows VMs at - * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - * and Linux VMs at - * https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - * - * Possible values: cachedisk - */ - placement?: string; -} - -/** An error that occurred when resizing a Pool. */ -export interface ResizeError { - /** - * An identifier for the Pool resize error. Codes are invariant and are intended - * to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Pool resize error, intended to be suitable for display - * in a user interface. - */ - message?: string; - /** A list of additional error details related to the Pool resize error. */ - values?: Array; -} - -/** Represents a name-value pair. */ -export interface NameValuePair { - /** The name in the name-value pair. */ - name?: string; - /** The value in the name-value pair. */ - value?: string; -} - -/** The results and errors from an execution of a Pool autoscale formula. */ -export interface AutoScaleRun { - /** - * Each variable value is returned in the form $variable=value, and variables are - * separated by semicolons. - */ - results?: string; - /** An error that occurred when executing or evaluating a Pool autoscale formula. */ - error?: AutoScaleRunError; -} - -/** An error that occurred when executing or evaluating a Pool autoscale formula. */ -export interface AutoScaleRunError { - /** - * An identifier for the autoscale error. Codes are invariant and are intended to - * be consumed programmatically. - */ - code?: string; - /** - * A message describing the autoscale error, intended to be suitable for display - * in a user interface. - */ - message?: string; - /** A list of additional error details related to the autoscale error. */ - values?: Array; -} - -/** The network configuration for a Pool. */ -export interface NetworkConfiguration { - /** - * The virtual network must be in the same region and subscription as the Azure - * Batch Account. The specified subnet should have enough free IP addresses to - * accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have - * enough free IP addresses, the Pool will partially allocate Nodes and a resize - * error will occur. The 'MicrosoftAzureBatch' service principal must have the - * 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for - * the specified VNet. The specified subnet must allow communication from the - * Azure Batch service to be able to schedule Tasks on the Nodes. This can be - * verified by checking if the specified VNet has any associated Network Security - * Groups (NSG). If communication to the Nodes in the specified subnet is denied - * by an NSG, then the Batch service will set the state of the Compute Nodes to - * unusable. For Pools created with virtualMachineConfiguration only ARM virtual - * networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools - * created with cloudServiceConfiguration both ARM and classic virtual networks - * are supported. If the specified VNet has any associated Network Security Groups - * (NSG), then a few reserved system ports must be enabled for inbound - * communication. For Pools created with a virtual machine configuration, enable - * ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. - * For Pools created with a cloud service configuration, enable ports 10100, - * 20100, and 30100. Also enable outbound connections to Azure Storage on port - * 443. For more details see: - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - */ - subnetId?: string; - /** - * The scope of dynamic vnet assignment. - * - * Possible values: none, job - */ - dynamicVNetAssignmentScope?: string; - /** - * Pool endpoint configuration is only supported on Pools with the - * virtualMachineConfiguration property. - */ - endpointConfiguration?: PoolEndpointConfiguration; - /** - * Public IP configuration property is only supported on Pools with the - * virtualMachineConfiguration property. - */ - publicIPAddressConfiguration?: PublicIPAddressConfiguration; -} - -/** The endpoint configuration for a Pool. */ -export interface PoolEndpointConfiguration { - /** - * The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum - * number of inbound NAT Pools is exceeded the request fails with HTTP status code - * 400. This cannot be specified if the IPAddressProvisioningType is - * NoPublicIPAddresses. - */ - inboundNATPools: Array; -} - -/** - * A inbound NAT Pool that can be used to address specific ports on Compute Nodes - * in a Batch Pool externally. - */ -export interface InboundNATPool { - /** - * The name must be unique within a Batch Pool, can contain letters, numbers, - * underscores, periods, and hyphens. Names must start with a letter or number, - * must end with a letter, number, or underscore, and cannot exceed 77 characters. - * If any invalid values are provided the request fails with HTTP status code - * 400. - */ - name: string; - /** - * The protocol of the endpoint. - * - * Possible values: tcp, udp - */ - protocol: string; - /** - * This must be unique within a Batch Pool. Acceptable values are between 1 and - * 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any - * reserved values are provided the request fails with HTTP status code 400. - */ - backendPort: number; - /** - * Acceptable values range between 1 and 65534 except ports from 50000 to 55000 - * which are reserved. All ranges within a Pool must be distinct and cannot - * overlap. Each range must contain at least 40 ports. If any reserved or - * overlapping values are provided the request fails with HTTP status code 400. - */ - frontendPortRangeStart: number; - /** - * Acceptable values range between 1 and 65534 except ports from 50000 to 55000 - * which are reserved by the Batch service. All ranges within a Pool must be - * distinct and cannot overlap. Each range must contain at least 40 ports. If any - * reserved or overlapping values are provided the request fails with HTTP status - * code 400. - */ - frontendPortRangeEnd: number; - /** - * The maximum number of rules that can be specified across all the endpoints on a - * Batch Pool is 25. If no network security group rules are specified, a default - * rule will be created to allow inbound access to the specified backendPort. If - * the maximum number of network security group rules is exceeded the request - * fails with HTTP status code 400. - */ - networkSecurityGroupRules?: Array; -} - -/** A network security group rule to apply to an inbound endpoint. */ -export interface NetworkSecurityGroupRule { - /** - * Priorities within a Pool must be unique and are evaluated in order of priority. - * The lower the number the higher the priority. For example, rules could be - * specified with order numbers of 150, 250, and 350. The rule with the order - * number of 150 takes precedence over the rule that has an order of 250. Allowed - * priorities are 150 to 4096. If any reserved or duplicate values are provided - * the request fails with HTTP status code 400. - */ - priority: number; - /** - * The action that should be taken for a specified IP address, subnet range or tag. - * - * Possible values: allow, deny - */ - access: string; - /** - * Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. - * 192.168.1.0/24), default tag, or * (for all addresses). If any other values - * are provided the request fails with HTTP status code 400. - */ - sourceAddressPrefix: string; - /** - * Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a - * port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each - * entry in this collection must not overlap any other entry (either a range or an - * individual port). If any other values are provided the request fails with HTTP - * status code 400. The default value is '*'. - */ - sourcePortRanges?: string[]; -} - -/** The public IP Address configuration of the networking configuration of a Pool. */ -export interface PublicIPAddressConfiguration { - /** - * The default value is BatchManaged. - * - * Possible values: batchmanaged, usermanaged, nopublicipaddresses - */ - provision?: string; - /** - * The number of IPs specified here limits the maximum size of the Pool - 100 - * dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public - * IP. For example, a pool needing 250 dedicated VMs would need at least 3 public - * IPs specified. Each element of this collection is of the form: - * /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - */ - ipAddressIds?: string[]; -} - -/** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ -export interface StartTask { - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ - containerSettings?: TaskContainerSettings; - /** Files listed under this element are located in the Task's working directory. */ - resourceFiles?: Array; - /** A list of environment variable settings for the StartTask. */ - environmentSettings?: Array; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentity; - /** - * The Batch service retries a Task if its exit code is nonzero. Note that this - * value specifically controls the number of retries. The Batch service will try - * the Task once, and may then retry up to this limit. For example, if the maximum - * retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 - * retries). If the maximum retry count is 0, the Batch service does not retry the - * Task. If the maximum retry count is -1, the Batch service retries the Task - * without limit, however this is not recommended for a start task or any task. - * The default value is 0 (no retries) - */ - maxTaskRetryCount?: number; - /** - * If true and the StartTask fails on a Node, the Batch service retries the - * StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has - * still not completed successfully after all retries, then the Batch service - * marks the Node unusable, and will not schedule Tasks to it. This condition can - * be detected via the Compute Node state and failure info details. If false, the - * Batch service will not wait for the StartTask to complete. In this case, other - * Tasks can start executing on the Compute Node while the StartTask is still - * running; and even if the StartTask fails, new Tasks will continue to be - * scheduled on the Compute Node. The default is true. - */ - waitForSuccess?: boolean; -} - -/** The container settings for a Task. */ -export interface TaskContainerSettings { - /** - * These additional options are supplied as arguments to the "docker create" - * command, in addition to those controlled by the Batch Service. - */ - containerRunOptions?: string; - /** - * This is the full Image reference, as would be specified to "docker pull". If - * no tag is provided as part of the Image name, the tag ":latest" is used as a - * default. - */ - imageName: string; - /** This setting can be omitted if was already provided at Pool creation. */ - registry?: ContainerRegistry; - /** - * The default is 'taskWorkingDirectory'. - * - * Possible values: taskWorkingDirectory, containerImageDefault - */ - workingDirectory?: string; -} - -/** A single file or multiple files to be downloaded to a Compute Node. */ -export interface ResourceFile { - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. - */ - autoStorageContainerName?: string; - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. This URL must be readable - * and listable from compute nodes. There are three ways to get such a URL for a - * container in Azure storage: include a Shared Access Signature (SAS) granting - * read and list permissions on the container, use a managed identity with read - * and list permissions, or set the ACL for the container to allow public access. - */ - storageContainerUrl?: string; - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. If the URL points to - * Azure Blob Storage, it must be readable from compute nodes. There are three - * ways to get such a URL for a blob in Azure storage: include a Shared Access - * Signature (SAS) granting read permissions on the blob, use a managed identity - * with read permission, or set the ACL for the blob or its container to allow - * public access. - */ - httpUrl?: string; - /** - * The property is valid only when autoStorageContainerName or storageContainerUrl - * is used. This prefix can be a partial filename or a subdirectory. If a prefix - * is not specified, all the files in the container will be downloaded. - */ - blobPrefix?: string; - /** - * If the httpUrl property is specified, the filePath is required and describes - * the path which the file will be downloaded to, including the filename. - * Otherwise, if the autoStorageContainerName or storageContainerUrl property is - * specified, filePath is optional and is the directory to download the files to. - * In the case where filePath is used as a directory, any directory structure - * already associated with the input data will be retained in full and appended to - * the specified filePath directory. The specified relative path cannot break out - * of the Task's working directory (for example by using '..'). - */ - filePath?: string; - /** - * This property applies only to files being downloaded to Linux Compute Nodes. It - * will be ignored if it is specified for a resourceFile which will be downloaded - * to a Windows Compute Node. If this property is not specified for a Linux - * Compute Node, then a default value of 0770 is applied to the file. - */ - fileMode?: string; - /** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ - identityReference?: ComputeNodeIdentityReference; -} - -/** An environment variable to be set on a Task process. */ -export interface EnvironmentSetting { - /** The name of the environment variable. */ - name: string; - /** The value of the environment variable. */ - value?: string; -} - -/** Specify either the userName or autoUser property, but not both. */ -export interface UserIdentity { - /** - * The userName and autoUser properties are mutually exclusive; you must specify - * one but not both. - */ - username?: string; - /** - * The userName and autoUser properties are mutually exclusive; you must specify - * one but not both. - */ - autoUser?: AutoUserSpecification; -} - -/** - * Specifies the parameters for the auto user that runs a Task on the Batch - * service. - */ -export interface AutoUserSpecification { - /** - * The default value is pool. If the pool is running Windows a value of Task - * should be specified if stricter isolation between tasks is required. For - * example, if the task mutates the registry in a way which could impact other - * tasks, or if certificates have been specified on the pool which should not be - * accessible by normal tasks but should be accessible by StartTasks. - * - * Possible values: task, pool - */ - scope?: string; - /** - * The default value is nonAdmin. - * - * Possible values: nonadmin, admin - */ - elevationLevel?: string; -} - -/** A reference to a Certificate to be installed on Compute Nodes in a Pool. */ -export interface CertificateReference { - /** The thumbprint of the Certificate. */ - thumbprint: string; - /** The algorithm with which the thumbprint is associated. This must be sha1. */ - thumbprintAlgorithm: string; - /** - * The default value is currentuser. This property is applicable only for Pools - * configured with Windows Compute Nodes (that is, created with - * cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows - * Image reference). For Linux Compute Nodes, the Certificates are stored in a - * directory inside the Task working directory and an environment variable - * AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - * For Certificates with visibility of 'remoteUser', a 'certs' directory is - * created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. - * - * Possible values: currentuser, localmachine - */ - storeLocation?: string; - /** - * This property is applicable only for Pools configured with Windows Compute - * Nodes (that is, created with cloudServiceConfiguration, or with - * virtualMachineConfiguration using a Windows Image reference). Common store - * names include: My, Root, CA, Trust, Disallowed, TrustedPeople, - * TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be - * used. The default value is My. - */ - storeName?: string; - /** - * You can specify more than one visibility in this collection. The default is all - * Accounts. - */ - visibility?: string[]; -} - -/** A reference to an Package to be deployed to Compute Nodes. */ -export interface ApplicationPackageReference { - /** - * When creating a pool, the package's application ID must be fully qualified - * (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - */ - applicationId: string; - /** - * If this is omitted on a Pool, and no default version is specified for this - * application, the request fails with the error code - * InvalidApplicationPackageReferences and HTTP status code 409. If this is - * omitted on a Task, and no default version is specified for this application, - * the Task fails with a pre-processing error. - */ - version?: string; -} - -/** Specifies how Tasks should be distributed across Compute Nodes. */ -export interface TaskSchedulingPolicy { - /** - * If not specified, the default is spread. - * - * Possible values: spread, pack - */ - nodeFillType: string; -} - -/** - * Properties used to create a user used to execute Tasks on an Azure Batch - * Compute Node. - */ -export interface UserAccount { - /** - * The name of the user Account. Names can contain any Unicode characters up to a - * maximum length of 20. - */ - name: string; - /** The password for the user Account. */ - password: string; - /** - * The default value is nonAdmin. - * - * Possible values: nonadmin, admin - */ - elevationLevel?: string; - /** - * This property is ignored if specified on a Windows Pool. If not specified, the - * user is created with the default options. - */ - linuxUserConfiguration?: LinuxUserConfiguration; - /** - * This property can only be specified if the user is on a Windows Pool. If not - * specified and on a Windows Pool, the user is created with the default options. - */ - windowsUserConfiguration?: WindowsUserConfiguration; -} - -/** Properties used to create a user Account on a Linux Compute Node. */ -export interface LinuxUserConfiguration { - /** - * The uid and gid properties must be specified together or not at all. If not - * specified the underlying operating system picks the uid. - */ - uid?: number; - /** - * The uid and gid properties must be specified together or not at all. If not - * specified the underlying operating system picks the gid. - */ - gid?: number; - /** - * The private key must not be password protected. The private key is used to - * automatically configure asymmetric-key based authentication for SSH between - * Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication - * property is true (it is ignored if enableInterNodeCommunication is false). It - * does this by placing the key pair into the user's .ssh directory. If not - * specified, password-less SSH is not configured between Compute Nodes (no - * modification of the user's .ssh directory is done). - */ - sshPrivateKey?: string; -} - -/** Properties used to create a user Account on a Windows Compute Node. */ -export interface WindowsUserConfiguration { - /** - * The default value for VirtualMachineConfiguration Pools is 'batch' and for - * CloudServiceConfiguration Pools is 'interactive'. - * - * Possible values: batch, interactive - */ - loginMode?: string; -} - -/** - * The Batch service does not assign any meaning to this metadata; it is solely - * for the use of user code. - */ -export interface MetadataItem { - /** The name of the metadata item. */ - name: string; - /** The value of the metadata item. */ - value: string; -} - -/** The file system to mount on each node. */ -export interface MountConfiguration { - /** This property is mutually exclusive with all other properties. */ - azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; - /** This property is mutually exclusive with all other properties. */ - nfsMountConfiguration?: NFSMountConfiguration; - /** This property is mutually exclusive with all other properties. */ - cifsMountConfiguration?: CifsMountConfiguration; - /** This property is mutually exclusive with all other properties. */ - azureFileShareConfiguration?: AzureFileShareConfiguration; -} - -/** Information used to connect to an Azure Storage Container using Blobfuse. */ -export interface AzureBlobFileSystemConfiguration { - /** The Azure Storage Account name. */ - accountName: string; - /** The Azure Blob Storage Container name. */ - containerName: string; - /** - * This property is mutually exclusive with both sasKey and identity; exactly one - * must be specified. - */ - accountKey?: string; - /** - * This property is mutually exclusive with both accountKey and identity; exactly - * one must be specified. - */ - sasKey?: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ - blobfuseOptions?: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ - relativeMountPath: string; - /** - * This property is mutually exclusive with both accountKey and sasKey; exactly - * one must be specified. - */ - identityReference?: ComputeNodeIdentityReference; -} - -/** Information used to connect to an NFS file system. */ -export interface NFSMountConfiguration { - /** The URI of the file system to mount. */ - source: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ - relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; -} - -/** Information used to connect to a CIFS file system. */ -export interface CifsMountConfiguration { - /** The user to use for authentication against the CIFS file system. */ - username: string; - /** The URI of the file system to mount. */ - source: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ - relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; - /** The password to use for authentication against the CIFS file system. */ - password: string; -} - -/** Information used to connect to an Azure Fileshare. */ -export interface AzureFileShareConfiguration { - /** The Azure Storage account name. */ - accountName: string; - /** This is of the form 'https://{account}.file.core.windows.net/'. */ - azureFileUrl: string; - /** The Azure Storage account key. */ - accountKey: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ - relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; -} - -/** The identity of the Batch pool, if configured. */ -export interface BatchPoolIdentity { - /** - * The list of user identities associated with the Batch pool. The user identity - * dictionary key references will be ARM resource ids in the form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - * - * Possible values: UserAssigned, None - */ - type: string; - /** - * The user identity dictionary key references will be ARM resource ids in the - * form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - */ - userAssignedIdentities?: Array; -} - -/** The user assigned Identity */ -export interface UserAssignedIdentity { - /** The ARM resource id of the user assigned identity */ - resourceId: string; -} - -/** Options for enabling automatic scaling on a Pool. */ -export interface BatchPoolEnableAutoScaleParameters { - /** - * The formula is checked for validity before it is applied to the Pool. If the - * formula is not valid, the Batch service rejects the request with detailed error - * information. For more information about specifying this formula, see - * Automatically scale Compute Nodes in an Azure Batch Pool - * (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - */ - autoScaleFormula?: string; - /** - * The default value is 15 minutes. The minimum and maximum value are 5 minutes - * and 168 hours respectively. If you specify a value less than 5 minutes or - * greater than 168 hours, the Batch service rejects the request with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). If you specify a new interval, then the existing - * autoscale evaluation schedule will be stopped and a new autoscale evaluation - * schedule will be started, with its starting time being the time when this - * request was issued. - */ - autoScaleEvaluationInterval?: string; -} - -/** Options for evaluating an automatic scaling formula on a Pool. */ -export interface BatchPoolEvaluateAutoScaleParameters { - /** - * The formula is validated and its results calculated, but it is not applied to - * the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a - * Pool'. For more information about specifying this formula, see Automatically - * scale Compute Nodes in an Azure Batch Pool - * (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - */ - autoScaleFormula: string; -} - -/** Options for changing the size of a Pool. */ -export interface BatchPoolResizeParameters { - /** The desired number of dedicated Compute Nodes in the Pool. */ - targetDedicatedNodes?: number; - /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ - targetLowPriorityNodes?: number; - /** - * The default value is 15 minutes. The minimum value is 5 minutes. If you specify - * a value less than 5 minutes, the Batch service returns an error; if you are - * calling the REST API directly, the HTTP status code is 400 (Bad Request). - */ - resizeTimeout?: string; - /** - * The default value is requeue. - * - * Possible values: requeue, terminate, taskcompletion, retaineddata - */ - nodeDeallocationOption?: string; -} - -/** Options for removing Compute Nodes from a Pool. */ -export interface NodeRemoveParameters { - /** A maximum of 100 nodes may be removed per request. */ - nodeList: string[]; - /** - * The default value is 15 minutes. The minimum value is 5 minutes. If you specify - * a value less than 5 minutes, the Batch service returns an error; if you are - * calling the REST API directly, the HTTP status code is 400 (Bad Request). - */ - resizeTimeout?: string; - /** - * The default value is requeue. - * - * Possible values: requeue, terminate, taskcompletion, retaineddata - */ - nodeDeallocationOption?: string; -} - -/** Resource usage statistics for a Job. */ -export interface JobStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in the Job. - */ - userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in the Job. - */ - kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on - * a Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If a Task was retried, this - * includes the wall clock time of all the Task retries. - */ - wallClockTime: string; - /** The total number of disk read operations made by all Tasks in the Job. */ - readIOps: number; - /** The total number of disk write operations made by all Tasks in the Job. */ - writeIOps: number; - /** The total amount of data in GiB read from disk by all Tasks in the Job. */ - readIOGiB: number; - /** The total amount of data in GiB written to disk by all Tasks in the Job. */ - writeIOGiB: number; - /** A Task completes successfully if it returns exit code 0. */ - numSucceededTasks: number; - /** - * A Task fails if it exhausts its maximum retry count without returning exit code - * 0. - */ - numFailedTasks: number; - /** - * The total number of retries on all the Tasks in the Job during the given time - * range. - */ - numTaskRetries: number; - /** - * The wait time for a Task is defined as the elapsed time between the creation of - * the Task and the start of Task execution. (If the Task is retried due to - * failures, the wait time is the time to the most recent Task execution.) This - * value is only reported in the Account lifetime statistics; it is not included - * in the Job statistics. - */ - waitTime: string; -} - -/** An Azure Batch Job. */ -export interface BatchJob { - /** - * The ID is case-preserving and case-insensitive (that is, you may not have two - * IDs within an Account that differ only by case). - */ - id?: string; - /** The display name for the Job. */ - displayName?: string; - /** - * Whether Tasks in the Job can define dependencies on each other. The default is - * false. - */ - usesTaskDependencies?: boolean; - /** - * Priority values can range from -1000 to 1000, with -1000 being the lowest - * priority and 1000 being the highest priority. The default value is 0. - */ - priority?: number; - /** - * If the value is set to True, other high priority jobs submitted to the system - * will take precedence and will be able requeue tasks from this job. You can - * update a job's allowTaskPreemption after it has been created using the update - * job API. - */ - allowTaskPreemption?: boolean; - /** - * The value of maxParallelTasks must be -1 or greater than 0 if specified. If not - * specified, the default value is -1, which means there's no limit to the number - * of tasks that can be run at once. You can update a job's maxParallelTasks after - * it has been created using the update job API. - */ - maxParallelTasks?: number; - /** The execution constraints for a Job. */ - constraints?: JobConstraints; - /** - * The Job Manager Task is automatically started when the Job is created. The - * Batch service tries to schedule the Job Manager Task before any other Tasks in - * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where - * Job Manager Tasks are running for as long as possible (that is, Compute Nodes - * running 'normal' Tasks are removed before Compute Nodes running Job Manager - * Tasks). When a Job Manager Task fails and needs to be restarted, the system - * tries to schedule it at the highest priority. If there are no idle Compute - * Nodes available, the system may terminate one of the running Tasks in the Pool - * and return it to the queue in order to make room for the Job Manager Task to - * restart. Note that a Job Manager Task in one Job does not have priority over - * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For - * example, if a Job Manager in a priority 0 Job needs to be restarted, it will - * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery - * operation is triggered on a Node. Examples of recovery operations include (but - * are not limited to) when an unhealthy Node is rebooted or a Compute Node - * disappeared due to host failure. Retries due to recovery operations are - * independent of and are not counted against the maxTaskRetryCount. Even if the - * maxTaskRetryCount is 0, an internal retry due to a recovery operation may - * occur. Because of this, all Tasks should be idempotent. This means Tasks need - * to tolerate being interrupted and restarted without causing any corruption or - * duplicate data. The best practice for long running Tasks is to use some form of - * checkpointing. - */ - jobManagerTask?: JobManagerTask; - /** - * The Job Preparation Task is a special Task run on each Compute Node before any - * other Task of the Job. - */ - jobPreparationTask?: JobPreparationTask; - /** - * The Job Release Task is a special Task run at the end of the Job on each - * Compute Node that has run any other Task of the Job. - */ - jobReleaseTask?: JobReleaseTask; - /** - * Individual Tasks can override an environment setting specified here by - * specifying the same setting name with a different value. - */ - commonEnvironmentSettings?: Array; - /** Specifies how a Job should be assigned to a Pool. */ - poolInfo?: PoolInformation; - /** - * The default is noaction. - * - * Possible values: noaction, terminatejob - */ - onAllTasksComplete?: string; - /** - * A Task is considered to have failed if has a failureInfo. A failureInfo is set - * if the Task completes with a non-zero exit code after exhausting its retry - * count, or if there was an error starting the Task, for example due to a - * resource file download error. The default is noaction. - * - * Possible values: noaction, performexitoptionsjobaction - */ - onTaskFailure?: string; - /** The network configuration for the Job. */ - networkConfiguration?: JobNetworkConfiguration; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ - metadata?: Array; -} - -/** The execution constraints for a Job. */ -export interface JobConstraints { - /** - * If the Job does not complete within the time limit, the Batch service - * terminates it and any Tasks that are still running. In this case, the - * termination reason will be MaxWallClockTimeExpiry. If this property is not - * specified, there is no time limit on how long the Job may run. - */ - maxWallClockTime?: string; - /** - * Note that this value specifically controls the number of retries. The Batch - * service will try each Task once, and may then retry up to this limit. For - * example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one - * initial try and 3 retries). If the maximum retry count is 0, the Batch service - * does not retry Tasks. If the maximum retry count is -1, the Batch service - * retries the Task without limit, however this is not recommended for a start - * task or any task. The default value is 0 (no retries) - */ - maxTaskRetryCount?: number; -} - -/** - * The Job Manager Task is automatically started when the Job is created. The - * Batch service tries to schedule the Job Manager Task before any other Tasks in - * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where - * Job Manager Tasks are running for as long as possible (that is, Compute Nodes - * running 'normal' Tasks are removed before Compute Nodes running Job Manager - * Tasks). When a Job Manager Task fails and needs to be restarted, the system - * tries to schedule it at the highest priority. If there are no idle Compute - * Nodes available, the system may terminate one of the running Tasks in the Pool - * and return it to the queue in order to make room for the Job Manager Task to - * restart. Note that a Job Manager Task in one Job does not have priority over - * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For - * example, if a Job Manager in a priority 0 Job needs to be restarted, it will - * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery - * operation is triggered on a Node. Examples of recovery operations include (but - * are not limited to) when an unhealthy Node is rebooted or a Compute Node - * disappeared due to host failure. Retries due to recovery operations are - * independent of and are not counted against the maxTaskRetryCount. Even if the - * maxTaskRetryCount is 0, an internal retry due to a recovery operation may - * occur. Because of this, all Tasks should be idempotent. This means Tasks need - * to tolerate being interrupted and restarted without causing any corruption or - * duplicate data. The best practice for long running Tasks is to use some form of - * checkpointing. - */ -export interface JobManagerTask { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. - */ - id: string; - /** - * It need not be unique and can contain any Unicode characters up to a maximum - * length of 1024. - */ - displayName?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine: string; - /** - * If the Pool that will run this Task has containerConfiguration set, this must - * be set as well. If the Pool that will run this Task doesn't have - * containerConfiguration set, this must not be set. When this is specified, all - * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure - * Batch directories on the node) are mapped into the container, all Task - * environment variables are mapped into the container, and the Task command line - * is executed in the container. Files produced in the container outside of - * AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that - * Batch file APIs will not be able to access those files. - */ - containerSettings?: TaskContainerSettings; - /** - * Files listed under this element are located in the Task's working directory. - * There is a maximum size for the list of resource files. When the max size is - * exceeded, the request will fail and the response error code will be - * RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be - * reduced in size. This can be achieved using .zip files, Application Packages, - * or Docker Containers. - */ - resourceFiles?: Array; - /** - * For multi-instance Tasks, the files will only be uploaded from the Compute Node - * on which the primary Task is executed. - */ - outputFiles?: Array; - /** A list of environment variable settings for the Job Manager Task. */ - environmentSettings?: Array; - /** Execution constraints to apply to a Task. */ - constraints?: TaskConstraints; - /** - * The default is 1. A Task can only be scheduled to run on a compute node if the - * node has enough free scheduling slots available. For multi-instance Tasks, this - * property is not supported and must not be specified. - */ - requiredSlots?: number; - /** - * If true, when the Job Manager Task completes, the Batch service marks the Job - * as complete. If any Tasks are still running at this time (other than Job - * Release), those Tasks are terminated. If false, the completion of the Job - * Manager Task does not affect the Job status. In this case, you should either - * use the onAllTasksComplete attribute to terminate the Job, or have a client or - * user terminate the Job explicitly. An example of this is if the Job Manager - * creates a set of Tasks but then takes no further role in their execution. The - * default value is true. If you are using the onAllTasksComplete and - * onTaskFailure attributes to control Job lifetime, and using the Job Manager - * Task only to create the Tasks for the Job (not to monitor progress), then it is - * important to set killJobOnCompletion to false. - */ - killJobOnCompletion?: boolean; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentity; - /** - * If true, no other Tasks will run on the same Node for as long as the Job - * Manager is running. If false, other Tasks can run simultaneously with the Job - * Manager on a Compute Node. The Job Manager Task counts normally against the - * Compute Node's concurrent Task limit, so this is only relevant if the Compute - * Node allows multiple concurrent Tasks. The default value is true. - */ - runExclusive?: boolean; - /** - * Application Packages are downloaded and deployed to a shared directory, not the - * Task working directory. Therefore, if a referenced Application Package is - * already on the Compute Node, and is up to date, then it is not re-downloaded; - * the existing copy on the Compute Node is used. If a referenced Application - * Package cannot be installed, for example because the package has been deleted - * or because download failed, the Task fails. - */ - applicationPackageReferences?: Array; - /** - * If this property is set, the Batch service provides the Task with an - * authentication token which can be used to authenticate Batch service operations - * without requiring an Account access key. The token is provided via the - * AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the - * Task can carry out using the token depend on the settings. For example, a Task - * can request Job permissions in order to add other Tasks to the Job, or check - * the status of the Job or of other Tasks under the Job. - */ - authenticationTokenSettings?: AuthenticationTokenSettings; - /** The default value is true. */ - allowLowPriorityNode?: boolean; -} - -/** - * On every file uploads, Batch service writes two log files to the compute node, - * 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn - * more about a specific failure. - */ -export interface OutputFile { - /** - * Both relative and absolute paths are supported. Relative paths are relative to - * the Task working directory. The following wildcards are supported: * matches 0 - * or more characters (for example pattern abc* would match abc or abcdef), ** - * matches any directory, ? matches any single character, [abc] matches one - * character in the brackets, and [a-c] matches one character in the range. - * Brackets can include a negation to match any character not specified (for - * example [!abc] matches any character but a, b, or c). If a file name starts - * with "." it is ignored by default but may be matched by specifying it - * explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple - * example: **\*.txt matches any file that does not start in '.' and ends with - * .txt in the Task working directory or any subdirectory. If the filename - * contains a wildcard character it can be escaped using brackets (for example - * abc[*] would match a file named abc*). Note that both \ and / are treated as - * directory separators on Windows, but only / is on Linux. Environment variables - * (%var% on Windows or $var on Linux) are expanded prior to the pattern being - * applied. - */ - filePattern: string; - /** The destination to which a file should be uploaded. */ - destination: OutputFileDestination; - /** - * Details about an output file upload operation, including under what conditions - * to perform the upload. - */ - uploadOptions: OutputFileUploadOptions; -} - -/** The destination to which a file should be uploaded. */ -export interface OutputFileDestination { - /** Specifies a file upload destination within an Azure blob storage container. */ - container?: OutputFileBlobContainerDestination; -} - -/** Specifies a file upload destination within an Azure blob storage container. */ -export interface OutputFileBlobContainerDestination { - /** - * If filePattern refers to a specific file (i.e. contains no wildcards), then - * path is the name of the blob to which to upload that file. If filePattern - * contains one or more wildcards (and therefore may match multiple files), then - * path is the name of the blob virtual directory (which is prepended to each blob - * name) to which to upload the file(s). If omitted, file(s) are uploaded to the - * root of the container with a blob name matching their file name. - */ - path?: string; - /** - * If not using a managed identity, the URL must include a Shared Access Signature - * (SAS) granting write permissions to the container. - */ - containerUrl: string; - /** The identity must have write access to the Azure Blob Storage container */ - identityReference?: ComputeNodeIdentityReference; - /** - * These headers will be specified when uploading files to Azure Storage. Official - * document on allowed headers when uploading blobs: - * https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types - */ - uploadHeaders?: Array; -} - -/** An HTTP header name-value pair */ -export interface HttpHeader { - /** The case-insensitive name of the header to be used while uploading output files */ - name: string; - /** The value of the header to be used while uploading output files */ - value?: string; -} - -/** - * Details about an output file upload operation, including under what conditions - * to perform the upload. - */ -export interface OutputFileUploadOptions { - /** - * The default is taskcompletion. - * - * Possible values: tasksuccess, taskfailure, taskcompletion - */ - uploadCondition: string; -} - -/** Execution constraints to apply to a Task. */ -export interface TaskConstraints { - /** If this is not specified, there is no time limit on how long the Task may run. */ - maxWallClockTime?: string; - /** - * The default is 7 days, i.e. the Task directory will be retained for 7 days - * unless the Compute Node is removed or the Job is deleted. - */ - retentionTime?: string; - /** - * Note that this value specifically controls the number of retries for the Task - * executable due to a nonzero exit code. The Batch service will try the Task - * once, and may then retry up to this limit. For example, if the maximum retry - * count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). - * If the maximum retry count is 0, the Batch service does not retry the Task - * after the first attempt. If the maximum retry count is -1, the Batch service - * retries the Task without limit, however this is not recommended for a start - * task or any task. The default value is 0 (no retries) - */ - maxTaskRetryCount?: number; -} - -/** - * The settings for an authentication token that the Task can use to perform Batch - * service operations. - */ -export interface AuthenticationTokenSettings { - /** - * The authentication token grants access to a limited set of Batch service - * operations. Currently the only supported value for the access property is - * 'job', which grants access to all operations related to the Job which contains - * the Task. - */ - access?: string[]; -} - -/** - * You can use Job Preparation to prepare a Node to run Tasks for the Job. - * Activities commonly performed in Job Preparation include: Downloading common - * resource files used by all the Tasks in the Job. The Job Preparation Task can - * download these common resource files to the shared location on the Node. - * (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the Node so - * that all Tasks of that Job can communicate with it. If the Job Preparation Task - * fails (that is, exhausts its retry count before exiting with exit code 0), - * Batch will not run Tasks of this Job on the Node. The Compute Node remains - * ineligible to run Tasks of this Job until it is reimaged. The Compute Node - * remains active and can be used for other Jobs. The Job Preparation Task can run - * multiple times on the same Node. Therefore, you should write the Job - * Preparation Task to handle re-execution. If the Node is rebooted, the Job - * Preparation Task is run again on the Compute Node before scheduling any other - * Task of the Job, if rerunOnNodeRebootAfterSuccess is true or if the Job - * Preparation Task did not previously complete. If the Node is reimaged, the Job - * Preparation Task is run again before scheduling any Task of the Job. Batch will - * retry Tasks when a recovery operation is triggered on a Node. Examples of - * recovery operations include (but are not limited to) when an unhealthy Node is - * rebooted or a Compute Node disappeared due to host failure. Retries due to - * recovery operations are independent of and are not counted against the - * maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to - * a recovery operation may occur. Because of this, all Tasks should be - * idempotent. This means Tasks need to tolerate being interrupted and restarted - * without causing any corruption or duplicate data. The best practice for long - * running Tasks is to use some form of checkpointing. - */ -export interface JobPreparationTask { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. If you do not - * specify this property, the Batch service assigns a default value of - * 'jobpreparation'. No other Task in the Job can have the same ID as the Job - * Preparation Task. If you try to submit a Task with the same id, the Batch - * service rejects the request with error code TaskIdSameAsJobPreparationTask; if - * you are calling the REST API directly, the HTTP status code is 409 (Conflict). - */ - id?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ - containerSettings?: TaskContainerSettings; - /** - * Files listed under this element are located in the Task's working directory. - * There is a maximum size for the list of resource files. When the max size is - * exceeded, the request will fail and the response error code will be - * RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be - * reduced in size. This can be achieved using .zip files, Application Packages, - * or Docker Containers. - */ - resourceFiles?: Array; - /** A list of environment variable settings for the Job Preparation Task. */ - environmentSettings?: Array; - /** Execution constraints to apply to a Task. */ - constraints?: TaskConstraints; - /** - * If true and the Job Preparation Task fails on a Node, the Batch service retries - * the Job Preparation Task up to its maximum retry count (as specified in the - * constraints element). If the Task has still not completed successfully after - * all retries, then the Batch service will not schedule Tasks of the Job to the - * Node. The Node remains active and eligible to run Tasks of other Jobs. If - * false, the Batch service will not wait for the Job Preparation Task to - * complete. In this case, other Tasks of the Job can start executing on the - * Compute Node while the Job Preparation Task is still running; and even if the - * Job Preparation Task fails, new Tasks will continue to be scheduled on the - * Compute Node. The default value is true. - */ - waitForSuccess?: boolean; - /** - * If omitted, the Task runs as a non-administrative user unique to the Task on - * Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux - * Compute Nodes. - */ - userIdentity?: UserIdentity; - /** - * The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - * the Job Preparation Task did not complete (e.g. because the reboot occurred - * while the Task was running). Therefore, you should always write a Job - * Preparation Task to be idempotent and to behave correctly if run multiple - * times. The default value is true. - */ - rerunOnNodeRebootAfterSuccess?: boolean; -} - -/** - * The Job Release Task runs when the Job ends, because of one of the following: - * The user calls the Terminate Job API, or the Delete Job API while the Job is - * still active, the Job's maximum wall clock time constraint is reached, and the - * Job is still active, or the Job's Job Manager Task completed, and the Job is - * configured to terminate when the Job Manager completes. The Job Release Task - * runs on each Node where Tasks of the Job have run and the Job Preparation Task - * ran and completed. If you reimage a Node after it has run the Job Preparation - * Task, and the Job ends without any further Tasks of the Job running on that - * Node (and hence the Job Preparation Task does not re-run), then the Job Release - * Task does not run on that Compute Node. If a Node reboots while the Job Release - * Task is still running, the Job Release Task runs again when the Compute Node - * starts up. The Job is not marked as complete until all Job Release Tasks have - * completed. The Job Release Task runs in the background. It does not occupy a - * scheduling slot; that is, it does not count towards the taskSlotsPerNode limit - * specified on the Pool. - */ -export interface JobReleaseTask { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. If you do not - * specify this property, the Batch service assigns a default value of - * 'jobrelease'. No other Task in the Job can have the same ID as the Job Release - * Task. If you try to submit a Task with the same id, the Batch service rejects - * the request with error code TaskIdSameAsJobReleaseTask; if you are calling the - * REST API directly, the HTTP status code is 409 (Conflict). - */ - id?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ - containerSettings?: TaskContainerSettings; - /** Files listed under this element are located in the Task's working directory. */ - resourceFiles?: Array; - /** A list of environment variable settings for the Job Release Task. */ - environmentSettings?: Array; - /** - * The maximum elapsed time that the Job Release Task may run on a given Compute - * Node, measured from the time the Task starts. If the Task does not complete - * within the time limit, the Batch service terminates it. The default value is 15 - * minutes. You may not specify a timeout longer than 15 minutes. If you do, the - * Batch service rejects it with an error; if you are calling the REST API - * directly, the HTTP status code is 400 (Bad Request). - */ - maxWallClockTime?: string; - /** - * The default is 7 days, i.e. the Task directory will be retained for 7 days - * unless the Compute Node is removed or the Job is deleted. - */ - retentionTime?: string; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentity; -} - -/** Specifies how a Job should be assigned to a Pool. */ -export interface PoolInformation { - /** - * You must ensure that the Pool referenced by this property exists. If the Pool - * does not exist at the time the Batch service tries to schedule a Job, no Tasks - * for the Job will run until you create a Pool with that id. Note that the Batch - * service will not reject the Job request; it will simply not run Tasks until the - * Pool exists. You must specify either the Pool ID or the auto Pool - * specification, but not both. - */ - poolId?: string; - /** - * If auto Pool creation fails, the Batch service moves the Job to a completed - * state, and the Pool creation error is set in the Job's scheduling error - * property. The Batch service manages the lifetime (both creation and, unless - * keepAlive is specified, deletion) of the auto Pool. Any user actions that - * affect the lifetime of the auto Pool while the Job is active will result in - * unexpected behavior. You must specify either the Pool ID or the auto Pool - * specification, but not both. - */ - autoPoolSpecification?: AutoPoolSpecification; -} - -/** - * Specifies characteristics for a temporary 'auto pool'. The Batch service will - * create this auto Pool when the Job is submitted. - */ -export interface AutoPoolSpecification { - /** - * The Batch service assigns each auto Pool a unique identifier on creation. To - * distinguish between Pools created for different purposes, you can specify this - * element to add a prefix to the ID that is assigned. The prefix can be up to 20 - * characters long. - */ - autoPoolIdPrefix?: string; - /** - * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule - * are assigned to Pools. - * - * Possible values: jobschedule, job - */ - poolLifetimeOption: string; - /** - * If false, the Batch service deletes the Pool once its lifetime (as determined - * by the poolLifetimeOption setting) expires; that is, when the Job or Job - * Schedule completes. If true, the Batch service does not delete the Pool - * automatically. It is up to the user to delete auto Pools created with this - * option. - */ - keepAlive?: boolean; - /** Specification for creating a new Pool. */ - pool?: PoolSpecification; -} - -/** Specification for creating a new Pool. */ -export interface PoolSpecification { - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ - displayName?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ - vmSize: string; - /** - * This property must be specified if the Pool needs to be created with Azure PaaS - * VMs. This property and virtualMachineConfiguration are mutually exclusive and - * one of the properties must be specified. If neither is specified then the Batch - * service returns an error; if you are calling the REST API directly, the HTTP - * status code is 400 (Bad Request). This property cannot be specified if the - * Batch Account was created with its poolAllocationMode property set to - * 'UserSubscription'. - */ - cloudServiceConfiguration?: CloudServiceConfiguration; - /** - * This property must be specified if the Pool needs to be created with Azure IaaS - * VMs. This property and cloudServiceConfiguration are mutually exclusive and one - * of the properties must be specified. If neither is specified then the Batch - * service returns an error; if you are calling the REST API directly, the HTTP - * status code is 400 (Bad Request). - */ - virtualMachineConfiguration?: VirtualMachineConfiguration; - /** - * The default value is 1. The maximum value is the smaller of 4 times the number - * of cores of the vmSize of the pool or 256. - */ - taskSlotsPerNode?: number; - /** If not specified, the default is spread. */ - taskSchedulingPolicy?: TaskSchedulingPolicy; - /** - * This timeout applies only to manual scaling; it has no effect when - * enableAutoScale is set to true. The default value is 15 minutes. The minimum - * value is 5 minutes. If you specify a value less than 5 minutes, the Batch - * service rejects the request with an error; if you are calling the REST API - * directly, the HTTP status code is 400 (Bad Request). - */ - resizeTimeout?: string; - /** - * This property must not be specified if enableAutoScale is set to true. If - * enableAutoScale is set to false, then you must set either targetDedicatedNodes, - * targetLowPriorityNodes, or both. - */ - targetDedicatedNodes?: number; - /** - * This property must not be specified if enableAutoScale is set to true. If - * enableAutoScale is set to false, then you must set either targetDedicatedNodes, - * targetLowPriorityNodes, or both. - */ - targetLowPriorityNodes?: number; - /** - * If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must - * be specified. If true, the autoScaleFormula element is required. The Pool - * automatically resizes according to the formula. The default value is false. - */ - enableAutoScale?: boolean; - /** - * This property must not be specified if enableAutoScale is set to false. It is - * required if enableAutoScale is set to true. The formula is checked for validity - * before the Pool is created. If the formula is not valid, the Batch service - * rejects the request with detailed error information. - */ - autoScaleFormula?: string; - /** - * The default value is 15 minutes. The minimum and maximum value are 5 minutes - * and 168 hours respectively. If you specify a value less than 5 minutes or - * greater than 168 hours, the Batch service rejects the request with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). - */ - autoScaleEvaluationInterval?: string; - /** - * Enabling inter-node communication limits the maximum size of the Pool due to - * deployment restrictions on the Compute Nodes of the Pool. This may result in - * the Pool not reaching its desired size. The default value is false. - */ - enableInterNodeCommunication?: boolean; - /** The network configuration for a Pool. */ - networkConfiguration?: NetworkConfiguration; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ - startTask?: StartTask; - /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. - */ - certificateReferences?: Array; - /** - * When creating a pool, the package's application ID must be fully qualified - * (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - * Changes to Package references affect all new Nodes joining the Pool, but do not - * affect Compute Nodes that are already in the Pool until they are rebooted or - * reimaged. There is a maximum of 10 Package references on any given Pool. - */ - applicationPackageReferences?: Array; - /** - * The list of application licenses must be a subset of available Batch service - * application licenses. If a license is requested which is not supported, Pool - * creation will fail. The permitted licenses available on the Pool are 'maya', - * 'vray', '3dsmax', 'arnold'. An additional charge applies for each application - * license added to the Pool. - */ - applicationLicenses?: string[]; - /** The list of user Accounts to be created on each Compute Node in the Pool. */ - userAccounts?: Array; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ - metadata?: Array; - /** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ - mountConfiguration?: Array; - /** - * If omitted, the default value is Default. - * - * Possible values: default, classic, simplified - */ - targetNodeCommunicationMode?: string; -} - -/** The network configuration for the Job. */ -export interface JobNetworkConfiguration { - /** - * The virtual network must be in the same region and subscription as the Azure - * Batch Account. The specified subnet should have enough free IP addresses to - * accommodate the number of Compute Nodes which will run Tasks from the Job. This - * can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' - * service principal must have the 'Classic Virtual Machine Contributor' - * Role-Based Access Control (RBAC) role for the specified VNet so that Azure - * Batch service can schedule Tasks on the Nodes. This can be verified by checking - * if the specified VNet has any associated Network Security Groups (NSG). If - * communication to the Nodes in the specified subnet is denied by an NSG, then - * the Batch service will set the state of the Compute Nodes to unusable. This is - * of the form - * /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - * If the specified VNet has any associated Network Security Groups (NSG), then a - * few reserved system ports must be enabled for inbound communication from the - * Azure Batch service. For Pools created with a Virtual Machine configuration, - * enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for - * Windows. Port 443 is also required to be open for outbound connections for - * communications to Azure Storage. For more details see: - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - */ - subnetId: string; -} - -/** Contains information about the execution of a Job in the Azure Batch service. */ -export interface JobExecutionInformation { - /** This is the time at which the Job was created. */ - startTime: Date | string; - /** This property is set only if the Job is in the completed state. */ - endTime?: Date | string; - /** - * This element contains the actual Pool where the Job is assigned. When you get - * Job details from the service, they also contain a poolInfo element, which - * contains the Pool configuration data from when the Job was added or updated. - * That poolInfo element may also contain a poolId element. If it does, the two - * IDs are the same. If it does not, it means the Job ran on an auto Pool, and - * this property contains the ID of that auto Pool. - */ - poolId?: string; - /** This property is not set if there was no error starting the Job. */ - schedulingError?: JobSchedulingError; - /** - * This property is set only if the Job is in the completed state. If the Batch - * service terminates the Job, it sets the reason as follows: JMComplete - the Job - * Manager Task completed, and killJobOnCompletion was set to true. - * MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. - * TerminateJobSchedule - the Job ran as part of a schedule, and the schedule - * terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to - * terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's - * onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in - * the Job failed with an exit condition that specified a jobAction of - * terminatejob. Any other string is a user-defined reason specified in a call to - * the 'Terminate a Job' operation. - */ - terminateReason?: string; -} - -/** An error encountered by the Batch service when scheduling a Job. */ -export interface JobSchedulingError { - /** - * The category of the error. - * - * Possible values: usererror, servererror - */ - category: string; - /** - * An identifier for the Job scheduling error. Codes are invariant and are - * intended to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Job scheduling error, intended to be suitable for - * display in a user interface. - */ - message?: string; - /** A list of additional error details related to the scheduling error. */ - details?: Array; -} - -/** Options when disabling a Job. */ -export interface BatchJobDisableParameters { - /** - * What to do with active Tasks associated with the Job. - * - * Possible values: requeue, terminate, wait - */ - disableTasks: string; -} - -/** Options when terminating a Job. */ -export interface BatchJobTerminateParameters { - /** - * The text you want to appear as the Job's TerminateReason. The default is - * 'UserTerminate'. - */ - terminateReason?: string; -} - -/** Contains information about the container which a Task is executing. */ -export interface TaskContainerExecutionInformation { - /** The ID of the container. */ - containerId?: string; - /** - * This is the state of the container according to the Docker service. It is - * equivalent to the status field returned by "docker inspect". - */ - state?: string; - /** - * This is the detailed error string from the Docker service, if available. It is - * equivalent to the error field returned by "docker inspect". - */ - error?: string; -} - -/** Information about a Task failure. */ -export interface TaskFailureInformation { - /** - * The category of the error. - * - * Possible values: usererror, servererror - */ - category: string; - /** - * An identifier for the Task error. Codes are invariant and are intended to be - * consumed programmatically. - */ - code?: string; - /** - * A message describing the Task error, intended to be suitable for display in a - * user interface. - */ - message?: string; - /** A list of additional details related to the error. */ - details?: Array; -} - -/** - * A Certificate that can be installed on Compute Nodes and can be used to - * authenticate operations on the machine. - */ -export interface Certificate { - /** - * The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex - * digits. - */ - thumbprint?: string; - /** The algorithm used to derive the thumbprint. */ - thumbprintAlgorithm?: string; - /** The base64-encoded contents of the Certificate. The maximum size is 10KB. */ - data?: string; - /** - * The format of the Certificate data. - * - * Possible values: pfx, cer - */ - certificateFormat?: string; - /** This must be omitted if the Certificate format is cer. */ - password?: string; -} - -/** An error encountered by the Batch service when deleting a Certificate. */ -export interface DeleteCertificateError { - /** - * An identifier for the Certificate deletion error. Codes are invariant and are - * intended to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Certificate deletion error, intended to be suitable - * for display in a user interface. - */ - message?: string; - /** - * This list includes details such as the active Pools and Compute Nodes - * referencing this Certificate. However, if a large number of resources reference - * the Certificate, the list contains only about the first hundred. - */ - values?: Array; -} - -/** - * A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a - * specification used to create each Job. - */ -export interface BatchJobSchedule { - /** A string that uniquely identifies the schedule within the Account. */ - id?: string; - /** The display name for the schedule. */ - displayName?: string; - /** - * All times are fixed respective to UTC and are not impacted by daylight saving - * time. - */ - schedule?: Schedule; - /** Specifies details of the Jobs to be created on a schedule. */ - jobSpecification?: JobSpecification; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ - metadata?: Array; -} - -/** - * The schedule according to which Jobs will be created. All times are fixed - * respective to UTC and are not impacted by daylight saving time. - */ -export interface Schedule { - /** - * If you do not specify a doNotRunUntil time, the schedule becomes ready to - * create Jobs immediately. - */ - doNotRunUntil?: Date | string; - /** - * If you do not specify a doNotRunAfter time, and you are creating a recurring - * Job Schedule, the Job Schedule will remain active until you explicitly - * terminate it. - */ - doNotRunAfter?: Date | string; - /** - * If a Job is not created within the startWindow interval, then the 'opportunity' - * is lost; no Job will be created until the next recurrence of the schedule. If - * the schedule is recurring, and the startWindow is longer than the recurrence - * interval, then this is equivalent to an infinite startWindow, because the Job - * that is 'due' in one recurrenceInterval is not carried forward into the next - * recurrence interval. The default is infinite. The minimum value is 1 minute. If - * you specify a lower value, the Batch service rejects the schedule with an - * error; if you are calling the REST API directly, the HTTP status code is 400 - * (Bad Request). - */ - startWindow?: string; - /** - * Because a Job Schedule can have at most one active Job under it at any given - * time, if it is time to create a new Job under a Job Schedule, but the previous - * Job is still running, the Batch service will not create the new Job until the - * previous Job finishes. If the previous Job does not finish within the - * startWindow period of the new recurrenceInterval, then no new Job will be - * scheduled for that interval. For recurring Jobs, you should normally specify a - * jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you - * will need an external process to monitor when Jobs are created, add Tasks to - * the Jobs and terminate the Jobs ready for the next recurrence. The default is - * that the schedule does not recur: one Job is created, within the startWindow - * after the doNotRunUntil time, and the schedule is complete as soon as that Job - * finishes. The minimum value is 1 minute. If you specify a lower value, the - * Batch service rejects the schedule with an error; if you are calling the REST - * API directly, the HTTP status code is 400 (Bad Request). - */ - recurrenceInterval?: string; -} - -/** Specifies details of the Jobs to be created on a schedule. */ -export interface JobSpecification { - /** - * Priority values can range from -1000 to 1000, with -1000 being the lowest - * priority and 1000 being the highest priority. The default value is 0. This - * priority is used as the default for all Jobs under the Job Schedule. You can - * update a Job's priority after it has been created using by using the update Job - * API. - */ - priority?: number; - /** - * If the value is set to True, other high priority jobs submitted to the system - * will take precedence and will be able requeue tasks from this job. You can - * update a job's allowTaskPreemption after it has been created using the update - * job API. - */ - allowTaskPreemption?: boolean; - /** - * The value of maxParallelTasks must be -1 or greater than 0 if specified. If not - * specified, the default value is -1, which means there's no limit to the number - * of tasks that can be run at once. You can update a job's maxParallelTasks after - * it has been created using the update job API. - */ - maxParallelTasks?: number; - /** - * The name need not be unique and can contain any Unicode characters up to a - * maximum length of 1024. - */ - displayName?: string; - /** - * Whether Tasks in the Job can define dependencies on each other. The default is - * false. - */ - usesTaskDependencies?: boolean; - /** - * Note that if a Job contains no Tasks, then all Tasks are considered complete. - * This option is therefore most commonly used with a Job Manager task; if you - * want to use automatic Job termination without a Job Manager, you should - * initially set onAllTasksComplete to noaction and update the Job properties to - * set onAllTasksComplete to terminatejob once you have finished adding Tasks. The - * default is noaction. - * - * Possible values: noaction, terminatejob - */ - onAllTasksComplete?: string; - /** - * The default is noaction. - * - * Possible values: noaction, performexitoptionsjobaction - */ - onTaskFailure?: string; - /** The network configuration for the Job. */ - networkConfiguration?: JobNetworkConfiguration; - /** The execution constraints for a Job. */ - constraints?: JobConstraints; - /** - * If the Job does not specify a Job Manager Task, the user must explicitly add - * Tasks to the Job using the Task API. If the Job does specify a Job Manager - * Task, the Batch service creates the Job Manager Task when the Job is created, - * and will try to schedule the Job Manager Task before scheduling other Tasks in - * the Job. - */ - jobManagerTask?: JobManagerTask; - /** - * If a Job has a Job Preparation Task, the Batch service will run the Job - * Preparation Task on a Node before starting any Tasks of that Job on that - * Compute Node. - */ - jobPreparationTask?: JobPreparationTask; - /** - * The primary purpose of the Job Release Task is to undo changes to Nodes made by - * the Job Preparation Task. Example activities include deleting local files, or - * shutting down services that were started as part of Job preparation. A Job - * Release Task cannot be specified without also specifying a Job Preparation Task - * for the Job. The Batch service runs the Job Release Task on the Compute Nodes - * that have run the Job Preparation Task. - */ - jobReleaseTask?: JobReleaseTask; - /** - * Individual Tasks can override an environment setting specified here by - * specifying the same setting name with a different value. - */ - commonEnvironmentSettings?: Array; - /** Specifies how a Job should be assigned to a Pool. */ - poolInfo: PoolInformation; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ - metadata?: Array; -} - -/** - * Contains information about Jobs that have been and will be run under a Job - * Schedule. - */ -export interface JobScheduleExecutionInformation { - /** - * This property is meaningful only if the schedule is in the active state when - * the time comes around. For example, if the schedule is disabled, no Job will be - * created at nextRunTime unless the Job is enabled before then. - */ - nextRunTime?: Date | string; - /** - * This property is present only if the at least one Job has run under the - * schedule. - */ - recentJob?: RecentJob; - /** This property is set only if the Job Schedule is in the completed state. */ - endTime?: Date | string; -} - -/** Information about the most recent Job to run under the Job Schedule. */ -export interface RecentJob { - /** The ID of the Job. */ - id?: string; - /** The URL of the Job. */ - url?: string; -} - -/** Resource usage statistics for a Job Schedule. */ -export interface JobScheduleStatistics { - /** The URL of the statistics. */ - url: string; - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in all Jobs created under the schedule. - */ - userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in all Jobs created under the schedule. - */ - kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on a - * Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If a Task was retried, this - * includes the wall clock time of all the Task retries. - */ - wallClockTime: string; - /** - * The total number of disk read operations made by all Tasks in all Jobs created - * under the schedule. - */ - readIOps: number; - /** - * The total number of disk write operations made by all Tasks in all Jobs created - * under the schedule. - */ - writeIOps: number; - /** - * The total gibibytes read from disk by all Tasks in all Jobs created under the - * schedule. - */ - readIOGiB: number; - /** - * The total gibibytes written to disk by all Tasks in all Jobs created under the - * schedule. - */ - writeIOGiB: number; - /** - * The total number of Tasks successfully completed during the given time range in - * Jobs created under the schedule. A Task completes successfully if it returns - * exit code 0. - */ - numSucceededTasks: number; - /** - * The total number of Tasks that failed during the given time range in Jobs - * created under the schedule. A Task fails if it exhausts its maximum retry count - * without returning exit code 0. - */ - numFailedTasks: number; - /** - * The total number of retries during the given time range on all Tasks in all - * Jobs created under the schedule. - */ - numTaskRetries: number; - /** - * This value is only reported in the Account lifetime statistics; it is not - * included in the Job statistics. - */ - waitTime: string; -} - -/** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. - */ -export interface BatchTask { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores, and cannot contain more than 64 characters. - */ - id?: string; - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ - displayName?: string; - /** How the Batch service should respond when the Task completes. */ - exitConditions?: ExitConditions; - /** - * For multi-instance Tasks, the command line is executed as the primary Task, - * after the primary Task and all subtasks have finished executing the - * coordination command line. The command line does not run under a shell, and - * therefore cannot take advantage of shell features such as environment variable - * expansion. If you want to take advantage of such features, you should invoke - * the shell in the command line, for example using "cmd /c MyCommand" in - * Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - * file paths, it should use a relative path (relative to the Task working - * directory), or use the Batch provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine?: string; - /** - * If the Pool that will run this Task has containerConfiguration set, this must - * be set as well. If the Pool that will run this Task doesn't have - * containerConfiguration set, this must not be set. When this is specified, all - * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure - * Batch directories on the node) are mapped into the container, all Task - * environment variables are mapped into the container, and the Task command line - * is executed in the container. Files produced in the container outside of - * AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that - * Batch file APIs will not be able to access those files. - */ - containerSettings?: TaskContainerSettings; - /** - * For multi-instance Tasks, the resource files will only be downloaded to the - * Compute Node on which the primary Task is executed. There is a maximum size for - * the list of resource files. When the max size is exceeded, the request will - * fail and the response error code will be RequestEntityTooLarge. If this occurs, - * the collection of ResourceFiles must be reduced in size. This can be achieved - * using .zip files, Application Packages, or Docker Containers. - */ - resourceFiles?: Array; - /** - * For multi-instance Tasks, the files will only be uploaded from the Compute Node - * on which the primary Task is executed. - */ - outputFiles?: Array; - /** A list of environment variable settings for the Task. */ - environmentSettings?: Array; - /** - * A locality hint that can be used by the Batch service to select a Compute Node - * on which to start a Task. - */ - affinityInfo?: AffinityInformation; - /** Execution constraints to apply to a Task. */ - constraints?: TaskConstraints; - /** - * The default is 1. A Task can only be scheduled to run on a compute node if the - * node has enough free scheduling slots available. For multi-instance Tasks, this - * must be 1. - */ - requiredSlots?: number; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentity; - /** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ - multiInstanceSettings?: MultiInstanceSettings; - /** - * This Task will not be scheduled until all Tasks that it depends on have - * completed successfully. If any of those Tasks fail and exhaust their retry - * counts, this Task will never be scheduled. - */ - dependsOn?: TaskDependencies; - /** - * Application packages are downloaded and deployed to a shared directory, not the - * Task working directory. Therefore, if a referenced package is already on the - * Node, and is up to date, then it is not re-downloaded; the existing copy on the - * Compute Node is used. If a referenced Package cannot be installed, for example - * because the package has been deleted or because download failed, the Task - * fails. - */ - applicationPackageReferences?: Array; - /** - * If this property is set, the Batch service provides the Task with an - * authentication token which can be used to authenticate Batch service operations - * without requiring an Account access key. The token is provided via the - * AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the - * Task can carry out using the token depend on the settings. For example, a Task - * can request Job permissions in order to add other Tasks to the Job, or check - * the status of the Job or of other Tasks under the Job. - */ - authenticationTokenSettings?: AuthenticationTokenSettings; -} - -/** Specifies how the Batch service should respond when the Task completes. */ -export interface ExitConditions { - /** - * A list of individual Task exit codes and how the Batch service should respond - * to them. - */ - exitCodes?: Array; - /** - * A list of Task exit code ranges and how the Batch service should respond to - * them. - */ - exitCodeRanges?: Array; - /** Specifies how the Batch service responds to a particular exit condition. */ - preProcessingError?: ExitOptions; - /** - * If the Task exited with an exit code that was specified via exitCodes or - * exitCodeRanges, and then encountered a file upload error, then the action - * specified by the exit code takes precedence. - */ - fileUploadError?: ExitOptions; - /** - * This value is used if the Task exits with any nonzero exit code not listed in - * the exitCodes or exitCodeRanges collection, with a pre-processing error if the - * preProcessingError property is not present, or with a file upload error if the - * fileUploadError property is not present. If you want non-default behavior on - * exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges - * collection. - */ - default?: ExitOptions; -} - -/** - * How the Batch service should respond if a Task exits with a particular exit - * code. - */ -export interface ExitCodeMapping { - /** A process exit code. */ - code: number; - /** Specifies how the Batch service responds to a particular exit condition. */ - exitOptions: ExitOptions; -} - -/** Specifies how the Batch service responds to a particular exit condition. */ -export interface ExitOptions { - /** - * The default is none for exit code 0 and terminate for all other exit - * conditions. If the Job's onTaskFailed property is noaction, then specifying - * this property returns an error and the add Task request fails with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). - * - * Possible values: none, disable, terminate - */ - jobAction?: string; - /** - * Possible values are 'satisfy' (allowing dependent tasks to progress) and - * 'block' (dependent tasks continue to wait). Batch does not yet support - * cancellation of dependent tasks. - * - * Possible values: satisfy, block - */ - dependencyAction?: string; -} - -/** - * A range of exit codes and how the Batch service should respond to exit codes - * within that range. - */ -export interface ExitCodeRangeMapping { - /** The first exit code in the range. */ - start: number; - /** The last exit code in the range. */ - end: number; - /** Specifies how the Batch service responds to a particular exit condition. */ - exitOptions: ExitOptions; -} - -/** - * A locality hint that can be used by the Batch service to select a Compute Node - * on which to start a Task. - */ -export interface AffinityInformation { - /** - * You can pass the affinityId of a Node to indicate that this Task needs to run - * on that Compute Node. Note that this is just a soft affinity. If the target - * Compute Node is busy or unavailable at the time the Task is scheduled, then the - * Task will be scheduled elsewhere. - */ - affinityId: string; -} - -/** Information about the execution of a Task. */ -export interface TaskExecutionInformation { - /** - * 'Running' corresponds to the running state, so if the Task specifies resource - * files or Packages, then the start time reflects the time at which the Task - * started downloading or deploying these. If the Task has been restarted or - * retried, this is the most recent time at which the Task started running. This - * property is present only for Tasks that are in the running or completed state. - */ - startTime?: Date | string; - /** This property is set only if the Task is in the Completed state. */ - endTime?: Date | string; - /** - * This property is set only if the Task is in the completed state. In general, - * the exit code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. However, if the Batch service terminates the Task (due - * to timeout, or user termination via the API) you may see an operating - * system-defined exit code. - */ - exitCode?: number; - /** This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformation; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ - failureInfo?: TaskFailureInformation; - /** - * Task application failures (non-zero exit code) are retried, pre-processing - * errors (the Task could not be run) and file upload errors are not retried. The - * Batch service will retry the Task up to the limit specified by the constraints. - */ - retryCount: number; - /** - * This element is present only if the Task was retried (i.e. retryCount is - * nonzero). If present, this is typically the same as startTime, but may be - * different if the Task has been restarted for reasons other than retry; for - * example, if the Compute Node was rebooted during a retry, then the startTime is - * updated but the lastRetryTime is not. - */ - lastRetryTime?: Date | string; - /** - * When the user removes Compute Nodes from a Pool (by resizing/shrinking the - * pool) or when the Job is being disabled, the user can specify that running - * Tasks on the Compute Nodes be requeued for execution. This count tracks how - * many times the Task has been requeued for these reasons. - */ - requeueCount: number; - /** This property is set only if the requeueCount is nonzero. */ - lastRequeueTime?: Date | string; - /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. - * - * Possible values: success, failure - */ - result?: string; -} - -/** Information about the Compute Node on which a Task ran. */ -export interface ComputeNodeInformation { - /** - * An identifier for the Node on which the Task ran, which can be passed when - * adding a Task to request that the Task be scheduled on this Compute Node. - */ - affinityId?: string; - /** The URL of the Compute Node on which the Task ran. */ - nodeUrl?: string; - /** The ID of the Pool on which the Task ran. */ - poolId?: string; - /** The ID of the Compute Node on which the Task ran. */ - nodeId?: string; - /** The root directory of the Task on the Compute Node. */ - taskRootDirectory?: string; - /** The URL to the root directory of the Task on the Compute Node. */ - taskRootDirectoryUrl?: string; -} - -/** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ -export interface MultiInstanceSettings { - /** If omitted, the default is 1. */ - numberOfInstances?: number; - /** - * A typical coordination command line launches a background service and verifies - * that the service is ready to process inter-node messages. - */ - coordinationCommandLine: string; - /** - * The difference between common resource files and Task resource files is that - * common resource files are downloaded for all subtasks including the primary, - * whereas Task resource files are downloaded only for the primary. Also note that - * these resource files are not downloaded to the Task working directory, but - * instead are downloaded to the Task root directory (one directory above the - * working directory). There is a maximum size for the list of resource files. - * When the max size is exceeded, the request will fail and the response error - * code will be RequestEntityTooLarge. If this occurs, the collection of - * ResourceFiles must be reduced in size. This can be achieved using .zip files, - * Application Packages, or Docker Containers. - */ - commonResourceFiles?: Array; -} - -/** Resource usage statistics for a Task. */ -export interface TaskStatistics { - /** The URL of the statistics. */ - url: string; - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by the Task. - */ - userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by the Task. - */ - kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on a - * Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If the Task was retried, this - * includes the wall clock time of all the Task retries. - */ - wallClockTime: string; - /** The total number of disk read operations made by the Task. */ - readIOps: number; - /** The total number of disk write operations made by the Task. */ - writeIOps: number; - /** The total gibibytes read from disk by the Task. */ - readIOGiB: number; - /** The total gibibytes written to disk by the Task. */ - writeIOGiB: number; - /** - * The total wait time of the Task. The wait time for a Task is defined as the - * elapsed time between the creation of the Task and the start of Task execution. - * (If the Task is retried due to failures, the wait time is the time to the most - * recent Task execution.) - */ - waitTime: string; -} - -/** - * Specifies any dependencies of a Task. Any Task that is explicitly specified or - * within a dependency range must complete before the dependant Task will be - * scheduled. - */ -export interface TaskDependencies { - /** - * The taskIds collection is limited to 64000 characters total (i.e. the combined - * length of all Task IDs). If the taskIds collection exceeds the maximum length, - * the Add Task request fails with error code TaskDependencyListTooLong. In this - * case consider using Task ID ranges instead. - */ - taskIds?: string[]; - /** - * The list of Task ID ranges that this Task depends on. All Tasks in all ranges - * must complete successfully before the dependent Task can be scheduled. - */ - taskIdRanges?: Array; -} - -/** - * The start and end of the range are inclusive. For example, if a range has start - * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - */ -export interface TaskIdRange { - /** The first Task ID in the range. */ - start: number; - /** The last Task ID in the range. */ - end: number; -} - -/** A collection of Azure Batch Tasks to add. */ -export interface BatchTaskCollection { - /** - * The total serialized size of this collection must be less than 1MB. If it is - * greater than 1MB (for example if each Task has 100's of resource files or - * environment variables), the request will fail with code 'RequestBodyTooLarge' - * and should be retried again with fewer Tasks. - */ - value: Array; -} - -/** A user Account for RDP or SSH access on a Compute Node. */ -export interface ComputeNodeUser { - /** The user name of the Account. */ - name: string; - /** The default value is false. */ - isAdmin?: boolean; - /** - * If omitted, the default is 1 day from the current time. For Linux Compute - * Nodes, the expiryTime has a precision up to a day. - */ - expiryTime?: Date | string; - /** - * The password is required for Windows Compute Nodes (those created with - * 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' - * using a Windows Image reference). For Linux Compute Nodes, the password can - * optionally be specified along with the sshPublicKey property. - */ - password?: string; - /** - * The public key should be compatible with OpenSSH encoding and should be base 64 - * encoded. This property can be specified only for Linux Compute Nodes. If this - * is specified for a Windows Compute Node, then the Batch service rejects the - * request; if you are calling the REST API directly, the HTTP status code is 400 - * (Bad Request). - */ - sshPublicKey?: string; -} - -/** The set of changes to be made to a user Account on a Compute Node. */ -export interface NodeUpdateUserParameters { - /** - * The password is required for Windows Compute Nodes (those created with - * 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' - * using a Windows Image reference). For Linux Compute Nodes, the password can - * optionally be specified along with the sshPublicKey property. If omitted, any - * existing password is removed. - */ - password?: string; - /** - * If omitted, the default is 1 day from the current time. For Linux Compute - * Nodes, the expiryTime has a precision up to a day. - */ - expiryTime?: Date | string; - /** - * The public key should be compatible with OpenSSH encoding and should be base 64 - * encoded. This property can be specified only for Linux Compute Nodes. If this - * is specified for a Windows Compute Node, then the Batch service rejects the - * request; if you are calling the REST API directly, the HTTP status code is 400 - * (Bad Request). If omitted, any existing SSH public key is removed. - */ - sshPublicKey?: string; -} - -/** Options for rebooting a Compute Node. */ -export interface NodeRebootParameters { - /** - * The default value is requeue. - * - * Possible values: requeue, terminate, taskcompletion, retaineddata - */ - nodeRebootOption?: string; -} - -/** Options for reimaging a Compute Node. */ -export interface NodeReimageParameters { - /** - * The default value is requeue. - * - * Possible values: requeue, terminate, taskcompletion, retaineddata - */ - nodeReimageOption?: string; -} - -/** Options for disabling scheduling on a Compute Node. */ -export interface NodeDisableSchedulingParameters { - /** - * The default value is requeue. - * - * Possible values: requeue, terminate, taskcompletion - */ - nodeDisableSchedulingOption?: string; -} - -/** The Azure Batch service log files upload configuration for a Compute Node. */ -export interface UploadBatchServiceLogsConfiguration { - /** - * If a user assigned managed identity is not being used, the URL must include a - * Shared Access Signature (SAS) granting write permissions to the container. The - * SAS duration must allow enough time for the upload to finish. The start time - * for SAS is optional and recommended to not be specified. - */ - containerUrl: string; - /** - * Any log file containing a log message in the time range will be uploaded. This - * means that the operation might retrieve more logs than have been requested - * since the entire log file is always uploaded, but the operation should not - * retrieve fewer logs than have been requested. - */ - startTime: Date | string; - /** - * Any log file containing a log message in the time range will be uploaded. This - * means that the operation might retrieve more logs than have been requested - * since the entire log file is always uploaded, but the operation should not - * retrieve fewer logs than have been requested. If omitted, the default is to - * upload all logs available after the startTime. - */ - endTime?: Date | string; - /** The identity must have write access to the Azure Blob Storage container. */ - identityReference?: object; -} diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/outputModels.ts b/packages/typespec-test/test/batch/generated/typespec-ts/src/outputModels.ts deleted file mode 100644 index e81ab815a4..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/outputModels.ts +++ /dev/null @@ -1,3872 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { Paged } from "@azure/core-paging"; - -/** The result of listing the applications available in an Account. */ -export interface ApplicationListResultOutput { - /** The list of applications available in the Account. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** Contains information about an application in an Azure Batch Account. */ -export interface ApplicationOutput { - /** A string that uniquely identifies the application within the Account. */ - readonly id: string; - /** The display name for the application. */ - displayName: string; - /** The list of available versions of the application. */ - versions: string[]; -} - -/** Usage metrics for a Pool across an aggregation interval. */ -export interface PoolUsageMetricsOutput { - /** The ID of the Pool whose metrics are aggregated in this entry. */ - readonly poolId: string; - /** The start time of the aggregation interval covered by this entry. */ - startTime: string; - /** The end time of the aggregation interval covered by this entry. */ - endTime: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ - vmSize: string; - /** The total core hours used in the Pool during this aggregation interval. */ - totalCoreHours: number; -} - -/** Contains utilization and resource usage statistics for the lifetime of a Pool. */ -export interface PoolStatisticsOutput { - /** The URL for the statistics. */ - readonly url: string; - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** Statistics related to Pool usage information. */ - usageStats?: UsageStatisticsOutput; - /** Statistics related to resource consumption by Compute Nodes in a Pool. */ - resourceStats?: ResourceStatisticsOutput; -} - -/** Statistics related to Pool usage information. */ -export interface UsageStatisticsOutput { - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** - * The aggregated wall-clock time of the dedicated Compute Node cores being part - * of the Pool. - */ - dedicatedCoreTime: string; -} - -/** Statistics related to resource consumption by Compute Nodes in a Pool. */ -export interface ResourceStatisticsOutput { - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** - * The average CPU usage across all Compute Nodes in the Pool (percentage per - * node). - */ - avgCPUPercentage: number; - /** The average memory usage in GiB across all Compute Nodes in the Pool. */ - avgMemoryGiB: number; - /** The peak memory usage in GiB across all Compute Nodes in the Pool. */ - peakMemoryGiB: number; - /** The average used disk space in GiB across all Compute Nodes in the Pool. */ - avgDiskGiB: number; - /** The peak used disk space in GiB across all Compute Nodes in the Pool. */ - peakDiskGiB: number; - /** The total number of disk read operations across all Compute Nodes in the Pool. */ - diskReadIOps: number; - /** The total number of disk write operations across all Compute Nodes in the Pool. */ - diskWriteIOps: number; - /** - * The total amount of data in GiB of disk reads across all Compute Nodes in the - * Pool. - */ - diskReadGiB: number; - /** - * The total amount of data in GiB of disk writes across all Compute Nodes in the - * Pool. - */ - diskWriteGiB: number; - /** - * The total amount of data in GiB of network reads across all Compute Nodes in - * the Pool. - */ - networkReadGiB: number; - /** - * The total amount of data in GiB of network writes across all Compute Nodes in - * the Pool. - */ - networkWriteGiB: number; -} - -/** A Pool in the Azure Batch service. */ -export interface BatchPoolOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores, and cannot contain more than 64 characters. The ID is - * case-preserving and case-insensitive (that is, you may not have two IDs within - * an Account that differ only by case). - */ - id?: string; - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ - displayName?: string; - /** The URL of the Pool. */ - readonly url?: string; - /** - * This is an opaque string. You can use it to detect whether the Pool has changed - * between requests. In particular, you can be pass the ETag when updating a Pool - * to specify that your changes should take effect only if nobody else has - * modified the Pool in the meantime. - */ - readonly eTag?: string; - /** - * This is the last time at which the Pool level data, such as the - * targetDedicatedNodes or enableAutoscale settings, changed. It does not factor - * in node-level changes such as a Compute Node changing state. - */ - readonly lastModified?: string; - /** The creation time of the Pool. */ - readonly creationTime?: string; - /** - * The current state of the Pool. - * - * Possible values: active, deleting - */ - readonly state?: string; - /** The time at which the Pool entered its current state. */ - readonly stateTransitionTime?: string; - /** - * Whether the Pool is resizing. - * - * Possible values: steady, resizing, stopping - */ - readonly allocationState?: string; - /** The time at which the Pool entered its current allocation state. */ - readonly allocationStateTransitionTime?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ - vmSize?: string; - /** - * This property and virtualMachineConfiguration are mutually exclusive and one of - * the properties must be specified. This property cannot be specified if the - * Batch Account was created with its poolAllocationMode property set to - * 'UserSubscription'. - */ - cloudServiceConfiguration?: CloudServiceConfigurationOutput; - /** - * This property and cloudServiceConfiguration are mutually exclusive and one of - * the properties must be specified. - */ - virtualMachineConfiguration?: VirtualMachineConfigurationOutput; - /** - * This is the timeout for the most recent resize operation. (The initial sizing - * when the Pool is created counts as a resize.) The default value is 15 minutes. - */ - resizeTimeout?: string; - /** - * This property is set only if one or more errors occurred during the last Pool - * resize, and only when the Pool allocationState is Steady. - */ - readonly resizeErrors?: Array; - /** The number of dedicated Compute Nodes currently in the Pool. */ - readonly currentDedicatedNodes?: number; - /** - * Spot/Low-priority Compute Nodes which have been preempted are included in this - * count. - */ - readonly currentLowPriorityNodes?: number; - /** The desired number of dedicated Compute Nodes in the Pool. */ - targetDedicatedNodes?: number; - /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ - targetLowPriorityNodes?: number; - /** - * If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must - * be specified. If true, the autoScaleFormula property is required and the Pool - * automatically resizes according to the formula. The default value is false. - */ - enableAutoScale?: boolean; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ - autoScaleFormula?: string; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ - autoScaleEvaluationInterval?: string; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ - readonly autoScaleRun?: AutoScaleRunOutput; - /** - * This imposes restrictions on which Compute Nodes can be assigned to the Pool. - * Specifying this value can reduce the chance of the requested number of Compute - * Nodes to be allocated in the Pool. - */ - enableInterNodeCommunication?: boolean; - /** The network configuration for a Pool. */ - networkConfiguration?: NetworkConfigurationOutput; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ - startTask?: StartTaskOutput; - /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. - */ - certificateReferences?: Array; - /** - * Changes to Package references affect all new Nodes joining the Pool, but do not - * affect Compute Nodes that are already in the Pool until they are rebooted or - * reimaged. There is a maximum of 10 Package references on any given Pool. - */ - applicationPackageReferences?: Array; - /** - * The list of application licenses must be a subset of available Batch service - * application licenses. If a license is requested which is not supported, Pool - * creation will fail. - */ - applicationLicenses?: string[]; - /** - * The default value is 1. The maximum value is the smaller of 4 times the number - * of cores of the vmSize of the pool or 256. - */ - taskSlotsPerNode?: number; - /** If not specified, the default is spread. */ - taskSchedulingPolicy?: TaskSchedulingPolicyOutput; - /** The list of user Accounts to be created on each Compute Node in the Pool. */ - userAccounts?: Array; - /** A list of name-value pairs associated with the Pool as metadata. */ - metadata?: Array; - /** - * This property is populated only if the CloudPool was retrieved with an expand - * clause including the 'stats' attribute; otherwise it is null. The statistics - * may not be immediately available. The Batch service performs periodic roll-up - * of statistics. The typical delay is about 30 minutes. - */ - readonly stats?: PoolStatisticsOutput; - /** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ - mountConfiguration?: Array; - /** - * The list of user identities associated with the Batch pool. The user identity - * dictionary key references will be ARM resource ids in the form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - */ - readonly identity?: BatchPoolIdentityOutput; - /** - * If omitted, the default value is Default. - * - * Possible values: default, classic, simplified - */ - targetNodeCommunicationMode?: string; - /** - * Determines how a pool communicates with the Batch service. - * - * Possible values: default, classic, simplified - */ - readonly currentNodeCommunicationMode?: string; -} - -/** - * The configuration for Compute Nodes in a Pool based on the Azure Cloud Services - * platform. - */ -export interface CloudServiceConfigurationOutput { - /** - * Possible values are: - * 2 - OS Family 2, equivalent to Windows Server 2008 R2 - * SP1. - * 3 - OS Family 3, equivalent to Windows Server 2012. - * 4 - OS Family 4, - * equivalent to Windows Server 2012 R2. - * 5 - OS Family 5, equivalent to Windows - * Server 2016. - * 6 - OS Family 6, equivalent to Windows Server 2019. For more - * information, see Azure Guest OS Releases - * (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). - */ - osFamily: string; - /** - * The default value is * which specifies the latest operating system version for - * the specified OS family. - */ - osVersion?: string; -} - -/** - * The configuration for Compute Nodes in a Pool based on the Azure Virtual - * Machines infrastructure. - */ -export interface VirtualMachineConfigurationOutput { - /** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ - imageReference: ImageReferenceOutput; - /** - * The Batch Compute Node agent is a program that runs on each Compute Node in the - * Pool, and provides the command-and-control interface between the Compute Node - * and the Batch service. There are different implementations of the Compute Node - * agent, known as SKUs, for different operating systems. You must specify a - * Compute Node agent SKU which matches the selected Image reference. To get the - * list of supported Compute Node agent SKUs along with their list of verified - * Image references, see the 'List supported Compute Node agent SKUs' operation. - */ - nodeAgentSKUId: string; - /** - * This property must not be specified if the imageReference property specifies a - * Linux OS Image. - */ - windowsConfiguration?: WindowsConfigurationOutput; - /** - * This property must be specified if the Compute Nodes in the Pool need to have - * empty data disks attached to them. This cannot be updated. Each Compute Node - * gets its own disk (the disk is not a file share). Existing disks cannot be - * attached, each attached disk is empty. When the Compute Node is removed from - * the Pool, the disk and all data associated with it is also deleted. The disk is - * not formatted after being attached, it must be formatted before use - for more - * information see - * https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux - * and - * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. - */ - dataDisks?: Array; - /** - * This only applies to Images that contain the Windows operating system, and - * should only be used when you hold valid on-premises licenses for the Compute - * Nodes which will be deployed. If omitted, no on-premises licensing discount is - * applied. Values are: - * - * Windows_Server - The on-premises license is for Windows - * Server. - * Windows_Client - The on-premises license is for Windows Client. - * - */ - licenseType?: string; - /** - * If specified, setup is performed on each Compute Node in the Pool to allow - * Tasks to run in containers. All regular Tasks and Job manager Tasks run on this - * Pool must specify the containerSettings property, and all other Tasks may - * specify it. - */ - containerConfiguration?: ContainerConfigurationOutput; - /** - * If specified, encryption is performed on each node in the pool during node - * provisioning. - */ - diskEncryptionConfiguration?: DiskEncryptionConfigurationOutput; - /** - * This configuration will specify rules on how nodes in the pool will be - * physically allocated. - */ - nodePlacementConfiguration?: NodePlacementConfigurationOutput; - /** - * If specified, the extensions mentioned in this configuration will be installed - * on each node. - */ - extensions?: Array; - /** Settings for the operating system disk of the compute node (VM). */ - osDisk?: OSDiskOutput; -} - -/** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ -export interface ImageReferenceOutput { - /** For example, Canonical or MicrosoftWindowsServer. */ - publisher?: string; - /** For example, UbuntuServer or WindowsServer. */ - offer?: string; - /** For example, 18.04-LTS or 2019-Datacenter. */ - sku?: string; - /** - * A value of 'latest' can be specified to select the latest version of an Image. - * If omitted, the default is 'latest'. - */ - version?: string; - /** - * This property is mutually exclusive with other ImageReference properties. The - * Shared Image Gallery Image must have replicas in the same region and must be in - * the same subscription as the Azure Batch account. If the image version is not - * specified in the imageId, the latest version will be used. For information - * about the firewall settings for the Batch Compute Node agent to communicate - * with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - */ - virtualMachineImageId?: string; - /** - * The specific version of the platform image or marketplace image used to create - * the node. This read-only field differs from 'version' only if the value - * specified for 'version' when the pool was created was 'latest'. - */ - readonly exactVersion?: string; -} - -/** Windows operating system settings to apply to the virtual machine. */ -export interface WindowsConfigurationOutput { - /** If omitted, the default value is true. */ - enableAutomaticUpdates?: boolean; -} - -/** - * Settings which will be used by the data disks associated to Compute Nodes in - * the Pool. When using attached data disks, you need to mount and format the - * disks from within a VM to use them. - */ -export interface DataDiskOutput { - /** - * The lun is used to uniquely identify each data disk. If attaching multiple - * disks, each should have a distinct lun. The value must be between 0 and 63, - * inclusive. - */ - lun: number; - /** - * The default value for caching is readwrite. For information about the caching - * options see: - * https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - * - * Possible values: none, readonly, readwrite - */ - caching?: string; - /** The initial disk size in gigabytes. */ - diskSizeGB: number; - /** - * If omitted, the default is "standard_lrs". - * - * Possible values: standard_lrs, premium_lrs - */ - storageAccountType?: string; -} - -/** The configuration for container-enabled Pools. */ -export interface ContainerConfigurationOutput { - /** - * The container technology to be used. - * - * Possible values: dockerCompatible - */ - type: string; - /** - * This is the full Image reference, as would be specified to "docker pull". An - * Image will be sourced from the default Docker registry unless the Image is - * fully qualified with an alternative registry. - */ - containerImageNames?: string[]; - /** - * If any Images must be downloaded from a private registry which requires - * credentials, then those credentials must be provided here. - */ - containerRegistries?: Array; -} - -/** A private container registry. */ -export interface ContainerRegistryOutput { - /** The user name to log into the registry server. */ - username?: string; - /** The password to log into the registry server. */ - password?: string; - /** If omitted, the default is "docker.io". */ - registryServer?: string; - /** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ - identityReference?: ComputeNodeIdentityReferenceOutput; -} - -/** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ -export interface ComputeNodeIdentityReferenceOutput { - /** The ARM resource id of the user assigned identity. */ - resourceId?: string; -} - -/** - * The disk encryption configuration applied on compute nodes in the pool. Disk - * encryption configuration is not supported on Linux pool created with Shared - * Image Gallery Image. - */ -export interface DiskEncryptionConfigurationOutput { - /** - * If omitted, no disks on the compute nodes in the pool will be encrypted. On - * Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" - * and "TemporaryDisk" must be specified. - */ - targets?: string[]; -} - -/** - * For regional placement, nodes in the pool will be allocated in the same region. - * For zonal placement, nodes in the pool will be spread across different zones - * with best effort balancing. - */ -export interface NodePlacementConfigurationOutput { - /** - * Allocation policy used by Batch Service to provision the nodes. If not - * specified, Batch will use the regional policy. - * - * Possible values: regional, zonal - */ - policy?: string; -} - -/** The configuration for virtual machine extensions. */ -export interface VMExtensionOutput { - /** The name of the virtual machine extension. */ - name: string; - /** The name of the extension handler publisher. */ - publisher: string; - /** The type of the extension. */ - type: string; - /** The version of script handler. */ - typeHandlerVersion?: string; - /** - * Indicates whether the extension should use a newer minor version if one is - * available at deployment time. Once deployed, however, the extension will not - * upgrade minor versions unless redeployed, even with this property set to true. - */ - autoUpgradeMinorVersion?: boolean; - /** JSON formatted public settings for the extension. */ - settings?: ObjectOutput; - /** - * The extension can contain either protectedSettings or - * protectedSettingsFromKeyVault or no protected settings at all. - */ - protectedSettings?: ObjectOutput; - /** - * Collection of extension names after which this extension needs to be - * provisioned. - */ - provisionAfterExtensions?: string[]; -} - -export interface ObjectOutput {} - -/** Settings for the operating system disk of the compute node (VM). */ -export interface OSDiskOutput { - /** - * Specifies the ephemeral Disk Settings for the operating system disk used by the - * compute node (VM). - */ - ephemeralOSDiskSettings?: DiffDiskSettingsOutput; -} - -/** - * Specifies the ephemeral Disk Settings for the operating system disk used by the - * compute node (VM). - */ -export interface DiffDiskSettingsOutput { - /** - * This property can be used by user in the request to choose the location e.g., - * cache disk space for Ephemeral OS disk provisioning. For more information on - * Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size - * requirements for Windows VMs at - * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - * and Linux VMs at - * https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - * - * Possible values: cachedisk - */ - placement?: string; -} - -/** An error that occurred when resizing a Pool. */ -export interface ResizeErrorOutput { - /** - * An identifier for the Pool resize error. Codes are invariant and are intended - * to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Pool resize error, intended to be suitable for display - * in a user interface. - */ - message?: string; - /** A list of additional error details related to the Pool resize error. */ - values?: Array; -} - -/** Represents a name-value pair. */ -export interface NameValuePairOutput { - /** The name in the name-value pair. */ - name?: string; - /** The value in the name-value pair. */ - value?: string; -} - -/** The results and errors from an execution of a Pool autoscale formula. */ -export interface AutoScaleRunOutput { - /** The time at which the autoscale formula was last evaluated. */ - readonly timestamp: string; - /** - * Each variable value is returned in the form $variable=value, and variables are - * separated by semicolons. - */ - results?: string; - /** An error that occurred when executing or evaluating a Pool autoscale formula. */ - error?: AutoScaleRunErrorOutput; -} - -/** An error that occurred when executing or evaluating a Pool autoscale formula. */ -export interface AutoScaleRunErrorOutput { - /** - * An identifier for the autoscale error. Codes are invariant and are intended to - * be consumed programmatically. - */ - code?: string; - /** - * A message describing the autoscale error, intended to be suitable for display - * in a user interface. - */ - message?: string; - /** A list of additional error details related to the autoscale error. */ - values?: Array; -} - -/** The network configuration for a Pool. */ -export interface NetworkConfigurationOutput { - /** - * The virtual network must be in the same region and subscription as the Azure - * Batch Account. The specified subnet should have enough free IP addresses to - * accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have - * enough free IP addresses, the Pool will partially allocate Nodes and a resize - * error will occur. The 'MicrosoftAzureBatch' service principal must have the - * 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for - * the specified VNet. The specified subnet must allow communication from the - * Azure Batch service to be able to schedule Tasks on the Nodes. This can be - * verified by checking if the specified VNet has any associated Network Security - * Groups (NSG). If communication to the Nodes in the specified subnet is denied - * by an NSG, then the Batch service will set the state of the Compute Nodes to - * unusable. For Pools created with virtualMachineConfiguration only ARM virtual - * networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools - * created with cloudServiceConfiguration both ARM and classic virtual networks - * are supported. If the specified VNet has any associated Network Security Groups - * (NSG), then a few reserved system ports must be enabled for inbound - * communication. For Pools created with a virtual machine configuration, enable - * ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. - * For Pools created with a cloud service configuration, enable ports 10100, - * 20100, and 30100. Also enable outbound connections to Azure Storage on port - * 443. For more details see: - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - */ - subnetId?: string; - /** - * The scope of dynamic vnet assignment. - * - * Possible values: none, job - */ - dynamicVNetAssignmentScope?: string; - /** - * Pool endpoint configuration is only supported on Pools with the - * virtualMachineConfiguration property. - */ - endpointConfiguration?: PoolEndpointConfigurationOutput; - /** - * Public IP configuration property is only supported on Pools with the - * virtualMachineConfiguration property. - */ - publicIPAddressConfiguration?: PublicIPAddressConfigurationOutput; -} - -/** The endpoint configuration for a Pool. */ -export interface PoolEndpointConfigurationOutput { - /** - * The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum - * number of inbound NAT Pools is exceeded the request fails with HTTP status code - * 400. This cannot be specified if the IPAddressProvisioningType is - * NoPublicIPAddresses. - */ - inboundNATPools: Array; -} - -/** - * A inbound NAT Pool that can be used to address specific ports on Compute Nodes - * in a Batch Pool externally. - */ -export interface InboundNATPoolOutput { - /** - * The name must be unique within a Batch Pool, can contain letters, numbers, - * underscores, periods, and hyphens. Names must start with a letter or number, - * must end with a letter, number, or underscore, and cannot exceed 77 characters. - * If any invalid values are provided the request fails with HTTP status code - * 400. - */ - name: string; - /** - * The protocol of the endpoint. - * - * Possible values: tcp, udp - */ - protocol: string; - /** - * This must be unique within a Batch Pool. Acceptable values are between 1 and - * 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any - * reserved values are provided the request fails with HTTP status code 400. - */ - backendPort: number; - /** - * Acceptable values range between 1 and 65534 except ports from 50000 to 55000 - * which are reserved. All ranges within a Pool must be distinct and cannot - * overlap. Each range must contain at least 40 ports. If any reserved or - * overlapping values are provided the request fails with HTTP status code 400. - */ - frontendPortRangeStart: number; - /** - * Acceptable values range between 1 and 65534 except ports from 50000 to 55000 - * which are reserved by the Batch service. All ranges within a Pool must be - * distinct and cannot overlap. Each range must contain at least 40 ports. If any - * reserved or overlapping values are provided the request fails with HTTP status - * code 400. - */ - frontendPortRangeEnd: number; - /** - * The maximum number of rules that can be specified across all the endpoints on a - * Batch Pool is 25. If no network security group rules are specified, a default - * rule will be created to allow inbound access to the specified backendPort. If - * the maximum number of network security group rules is exceeded the request - * fails with HTTP status code 400. - */ - networkSecurityGroupRules?: Array; -} - -/** A network security group rule to apply to an inbound endpoint. */ -export interface NetworkSecurityGroupRuleOutput { - /** - * Priorities within a Pool must be unique and are evaluated in order of priority. - * The lower the number the higher the priority. For example, rules could be - * specified with order numbers of 150, 250, and 350. The rule with the order - * number of 150 takes precedence over the rule that has an order of 250. Allowed - * priorities are 150 to 4096. If any reserved or duplicate values are provided - * the request fails with HTTP status code 400. - */ - priority: number; - /** - * The action that should be taken for a specified IP address, subnet range or tag. - * - * Possible values: allow, deny - */ - access: string; - /** - * Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. - * 192.168.1.0/24), default tag, or * (for all addresses). If any other values - * are provided the request fails with HTTP status code 400. - */ - sourceAddressPrefix: string; - /** - * Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a - * port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each - * entry in this collection must not overlap any other entry (either a range or an - * individual port). If any other values are provided the request fails with HTTP - * status code 400. The default value is '*'. - */ - sourcePortRanges?: string[]; -} - -/** The public IP Address configuration of the networking configuration of a Pool. */ -export interface PublicIPAddressConfigurationOutput { - /** - * The default value is BatchManaged. - * - * Possible values: batchmanaged, usermanaged, nopublicipaddresses - */ - provision?: string; - /** - * The number of IPs specified here limits the maximum size of the Pool - 100 - * dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public - * IP. For example, a pool needing 250 dedicated VMs would need at least 3 public - * IPs specified. Each element of this collection is of the form: - * /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - */ - ipAddressIds?: string[]; -} - -/** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ -export interface StartTaskOutput { - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ - containerSettings?: TaskContainerSettingsOutput; - /** Files listed under this element are located in the Task's working directory. */ - resourceFiles?: Array; - /** A list of environment variable settings for the StartTask. */ - environmentSettings?: Array; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentityOutput; - /** - * The Batch service retries a Task if its exit code is nonzero. Note that this - * value specifically controls the number of retries. The Batch service will try - * the Task once, and may then retry up to this limit. For example, if the maximum - * retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 - * retries). If the maximum retry count is 0, the Batch service does not retry the - * Task. If the maximum retry count is -1, the Batch service retries the Task - * without limit, however this is not recommended for a start task or any task. - * The default value is 0 (no retries) - */ - maxTaskRetryCount?: number; - /** - * If true and the StartTask fails on a Node, the Batch service retries the - * StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has - * still not completed successfully after all retries, then the Batch service - * marks the Node unusable, and will not schedule Tasks to it. This condition can - * be detected via the Compute Node state and failure info details. If false, the - * Batch service will not wait for the StartTask to complete. In this case, other - * Tasks can start executing on the Compute Node while the StartTask is still - * running; and even if the StartTask fails, new Tasks will continue to be - * scheduled on the Compute Node. The default is true. - */ - waitForSuccess?: boolean; -} - -/** The container settings for a Task. */ -export interface TaskContainerSettingsOutput { - /** - * These additional options are supplied as arguments to the "docker create" - * command, in addition to those controlled by the Batch Service. - */ - containerRunOptions?: string; - /** - * This is the full Image reference, as would be specified to "docker pull". If - * no tag is provided as part of the Image name, the tag ":latest" is used as a - * default. - */ - imageName: string; - /** This setting can be omitted if was already provided at Pool creation. */ - registry?: ContainerRegistryOutput; - /** - * The default is 'taskWorkingDirectory'. - * - * Possible values: taskWorkingDirectory, containerImageDefault - */ - workingDirectory?: string; -} - -/** A single file or multiple files to be downloaded to a Compute Node. */ -export interface ResourceFileOutput { - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. - */ - autoStorageContainerName?: string; - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. This URL must be readable - * and listable from compute nodes. There are three ways to get such a URL for a - * container in Azure storage: include a Shared Access Signature (SAS) granting - * read and list permissions on the container, use a managed identity with read - * and list permissions, or set the ACL for the container to allow public access. - */ - storageContainerUrl?: string; - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. If the URL points to - * Azure Blob Storage, it must be readable from compute nodes. There are three - * ways to get such a URL for a blob in Azure storage: include a Shared Access - * Signature (SAS) granting read permissions on the blob, use a managed identity - * with read permission, or set the ACL for the blob or its container to allow - * public access. - */ - httpUrl?: string; - /** - * The property is valid only when autoStorageContainerName or storageContainerUrl - * is used. This prefix can be a partial filename or a subdirectory. If a prefix - * is not specified, all the files in the container will be downloaded. - */ - blobPrefix?: string; - /** - * If the httpUrl property is specified, the filePath is required and describes - * the path which the file will be downloaded to, including the filename. - * Otherwise, if the autoStorageContainerName or storageContainerUrl property is - * specified, filePath is optional and is the directory to download the files to. - * In the case where filePath is used as a directory, any directory structure - * already associated with the input data will be retained in full and appended to - * the specified filePath directory. The specified relative path cannot break out - * of the Task's working directory (for example by using '..'). - */ - filePath?: string; - /** - * This property applies only to files being downloaded to Linux Compute Nodes. It - * will be ignored if it is specified for a resourceFile which will be downloaded - * to a Windows Compute Node. If this property is not specified for a Linux - * Compute Node, then a default value of 0770 is applied to the file. - */ - fileMode?: string; - /** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ - identityReference?: ComputeNodeIdentityReferenceOutput; -} - -/** An environment variable to be set on a Task process. */ -export interface EnvironmentSettingOutput { - /** The name of the environment variable. */ - name: string; - /** The value of the environment variable. */ - value?: string; -} - -/** Specify either the userName or autoUser property, but not both. */ -export interface UserIdentityOutput { - /** - * The userName and autoUser properties are mutually exclusive; you must specify - * one but not both. - */ - username?: string; - /** - * The userName and autoUser properties are mutually exclusive; you must specify - * one but not both. - */ - autoUser?: AutoUserSpecificationOutput; -} - -/** - * Specifies the parameters for the auto user that runs a Task on the Batch - * service. - */ -export interface AutoUserSpecificationOutput { - /** - * The default value is pool. If the pool is running Windows a value of Task - * should be specified if stricter isolation between tasks is required. For - * example, if the task mutates the registry in a way which could impact other - * tasks, or if certificates have been specified on the pool which should not be - * accessible by normal tasks but should be accessible by StartTasks. - * - * Possible values: task, pool - */ - scope?: string; - /** - * The default value is nonAdmin. - * - * Possible values: nonadmin, admin - */ - elevationLevel?: string; -} - -/** A reference to a Certificate to be installed on Compute Nodes in a Pool. */ -export interface CertificateReferenceOutput { - /** The thumbprint of the Certificate. */ - thumbprint: string; - /** The algorithm with which the thumbprint is associated. This must be sha1. */ - thumbprintAlgorithm: string; - /** - * The default value is currentuser. This property is applicable only for Pools - * configured with Windows Compute Nodes (that is, created with - * cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows - * Image reference). For Linux Compute Nodes, the Certificates are stored in a - * directory inside the Task working directory and an environment variable - * AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - * For Certificates with visibility of 'remoteUser', a 'certs' directory is - * created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. - * - * Possible values: currentuser, localmachine - */ - storeLocation?: string; - /** - * This property is applicable only for Pools configured with Windows Compute - * Nodes (that is, created with cloudServiceConfiguration, or with - * virtualMachineConfiguration using a Windows Image reference). Common store - * names include: My, Root, CA, Trust, Disallowed, TrustedPeople, - * TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be - * used. The default value is My. - */ - storeName?: string; - /** - * You can specify more than one visibility in this collection. The default is all - * Accounts. - */ - visibility?: string[]; -} - -/** A reference to an Package to be deployed to Compute Nodes. */ -export interface ApplicationPackageReferenceOutput { - /** - * When creating a pool, the package's application ID must be fully qualified - * (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - */ - applicationId: string; - /** - * If this is omitted on a Pool, and no default version is specified for this - * application, the request fails with the error code - * InvalidApplicationPackageReferences and HTTP status code 409. If this is - * omitted on a Task, and no default version is specified for this application, - * the Task fails with a pre-processing error. - */ - version?: string; -} - -/** Specifies how Tasks should be distributed across Compute Nodes. */ -export interface TaskSchedulingPolicyOutput { - /** - * If not specified, the default is spread. - * - * Possible values: spread, pack - */ - nodeFillType: string; -} - -/** - * Properties used to create a user used to execute Tasks on an Azure Batch - * Compute Node. - */ -export interface UserAccountOutput { - /** - * The name of the user Account. Names can contain any Unicode characters up to a - * maximum length of 20. - */ - name: string; - /** The password for the user Account. */ - password: string; - /** - * The default value is nonAdmin. - * - * Possible values: nonadmin, admin - */ - elevationLevel?: string; - /** - * This property is ignored if specified on a Windows Pool. If not specified, the - * user is created with the default options. - */ - linuxUserConfiguration?: LinuxUserConfigurationOutput; - /** - * This property can only be specified if the user is on a Windows Pool. If not - * specified and on a Windows Pool, the user is created with the default options. - */ - windowsUserConfiguration?: WindowsUserConfigurationOutput; -} - -/** Properties used to create a user Account on a Linux Compute Node. */ -export interface LinuxUserConfigurationOutput { - /** - * The uid and gid properties must be specified together or not at all. If not - * specified the underlying operating system picks the uid. - */ - uid?: number; - /** - * The uid and gid properties must be specified together or not at all. If not - * specified the underlying operating system picks the gid. - */ - gid?: number; - /** - * The private key must not be password protected. The private key is used to - * automatically configure asymmetric-key based authentication for SSH between - * Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication - * property is true (it is ignored if enableInterNodeCommunication is false). It - * does this by placing the key pair into the user's .ssh directory. If not - * specified, password-less SSH is not configured between Compute Nodes (no - * modification of the user's .ssh directory is done). - */ - sshPrivateKey?: string; -} - -/** Properties used to create a user Account on a Windows Compute Node. */ -export interface WindowsUserConfigurationOutput { - /** - * The default value for VirtualMachineConfiguration Pools is 'batch' and for - * CloudServiceConfiguration Pools is 'interactive'. - * - * Possible values: batch, interactive - */ - loginMode?: string; -} - -/** - * The Batch service does not assign any meaning to this metadata; it is solely - * for the use of user code. - */ -export interface MetadataItemOutput { - /** The name of the metadata item. */ - name: string; - /** The value of the metadata item. */ - value: string; -} - -/** The file system to mount on each node. */ -export interface MountConfigurationOutput { - /** This property is mutually exclusive with all other properties. */ - azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfigurationOutput; - /** This property is mutually exclusive with all other properties. */ - nfsMountConfiguration?: NFSMountConfigurationOutput; - /** This property is mutually exclusive with all other properties. */ - cifsMountConfiguration?: CifsMountConfigurationOutput; - /** This property is mutually exclusive with all other properties. */ - azureFileShareConfiguration?: AzureFileShareConfigurationOutput; -} - -/** Information used to connect to an Azure Storage Container using Blobfuse. */ -export interface AzureBlobFileSystemConfigurationOutput { - /** The Azure Storage Account name. */ - accountName: string; - /** The Azure Blob Storage Container name. */ - containerName: string; - /** - * This property is mutually exclusive with both sasKey and identity; exactly one - * must be specified. - */ - accountKey?: string; - /** - * This property is mutually exclusive with both accountKey and identity; exactly - * one must be specified. - */ - sasKey?: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ - blobfuseOptions?: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ - relativeMountPath: string; - /** - * This property is mutually exclusive with both accountKey and sasKey; exactly - * one must be specified. - */ - identityReference?: ComputeNodeIdentityReferenceOutput; -} - -/** Information used to connect to an NFS file system. */ -export interface NFSMountConfigurationOutput { - /** The URI of the file system to mount. */ - source: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ - relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; -} - -/** Information used to connect to a CIFS file system. */ -export interface CifsMountConfigurationOutput { - /** The user to use for authentication against the CIFS file system. */ - username: string; - /** The URI of the file system to mount. */ - source: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ - relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; - /** The password to use for authentication against the CIFS file system. */ - password: string; -} - -/** Information used to connect to an Azure Fileshare. */ -export interface AzureFileShareConfigurationOutput { - /** The Azure Storage account name. */ - accountName: string; - /** This is of the form 'https://{account}.file.core.windows.net/'. */ - azureFileUrl: string; - /** The Azure Storage account key. */ - accountKey: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ - relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; -} - -/** The identity of the Batch pool, if configured. */ -export interface BatchPoolIdentityOutput { - /** - * The list of user identities associated with the Batch pool. The user identity - * dictionary key references will be ARM resource ids in the form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - * - * Possible values: UserAssigned, None - */ - type: string; - /** - * The user identity dictionary key references will be ARM resource ids in the - * form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - */ - userAssignedIdentities?: Array; -} - -/** The user assigned Identity */ -export interface UserAssignedIdentityOutput { - /** The ARM resource id of the user assigned identity */ - resourceId: string; - /** The client id of the user assigned identity. */ - readonly clientId?: string; - /** The principal id of the user assigned identity. */ - readonly principalId?: string; -} - -/** The result of listing the Pools in an Account. */ -export interface BatchPoolListResultOutput { - /** The list of Pools. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** The result of listing the supported Virtual Machine Images. */ -export interface AccountListSupportedImagesResultOutput { - /** The list of supported Virtual Machine Images. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** - * A reference to the Azure Virtual Machines Marketplace Image and additional - * information about the Image. - */ -export interface ImageInformationOutput { - /** The ID of the Compute Node agent SKU which the Image supports. */ - readonly nodeAgentSKUId: string; - /** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ - imageReference: ImageReferenceOutput; - /** - * The type of operating system (e.g. Windows or Linux) of the Image. - * - * Possible values: linux, windows - */ - osType: string; - /** - * Not every capability of the Image is listed. Capabilities in this list are - * considered of special interest and are generally related to integration with - * other features in the Azure Batch service. - */ - capabilities?: string[]; - /** - * The time when the Azure Batch service will stop accepting create Pool requests - * for the Image. - */ - batchSupportEndOfLife?: string; - /** - * Whether the Azure Batch service actively verifies that the Image is compatible - * with the associated Compute Node agent SKU. - * - * Possible values: verified, unverified - */ - verificationType: string; -} - -/** The result of listing the Compute Node counts in the Account. */ -export interface PoolNodeCountsListResultOutput { - /** A list of Compute Node counts by Pool. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** The number of Compute Nodes in each state for a Pool. */ -export interface PoolNodeCountsOutput { - /** The ID of the Pool. */ - readonly poolId: string; - /** The number of Compute Nodes in each Compute Node state. */ - dedicated?: NodeCountsOutput; - /** The number of Compute Nodes in each Compute Node state. */ - lowPriority?: NodeCountsOutput; -} - -/** The number of Compute Nodes in each Compute Node state. */ -export interface NodeCountsOutput { - /** The number of Compute Nodes in the creating state. */ - creating: number; - /** The number of Compute Nodes in the idle state. */ - idle: number; - /** The number of Compute Nodes in the offline state. */ - offline: number; - /** The number of Compute Nodes in the preempted state. */ - preempted: number; - /** The count of Compute Nodes in the rebooting state. */ - rebooting: number; - /** The number of Compute Nodes in the reimaging state. */ - reimaging: number; - /** The number of Compute Nodes in the running state. */ - running: number; - /** The number of Compute Nodes in the starting state. */ - starting: number; - /** The number of Compute Nodes in the startTaskFailed state. */ - startTaskFailed: number; - /** The number of Compute Nodes in the leavingPool state. */ - leavingPool: number; - /** The number of Compute Nodes in the unknown state. */ - unknown: number; - /** The number of Compute Nodes in the unusable state. */ - unusable: number; - /** The number of Compute Nodes in the waitingForStartTask state. */ - waitingForStartTask: number; - /** The total number of Compute Nodes. */ - total: number; -} - -/** Resource usage statistics for a Job. */ -export interface JobStatisticsOutput { - /** The URL of the statistics. */ - readonly url: string; - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in the Job. - */ - userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in the Job. - */ - kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on - * a Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If a Task was retried, this - * includes the wall clock time of all the Task retries. - */ - wallClockTime: string; - /** The total number of disk read operations made by all Tasks in the Job. */ - readIOps: number; - /** The total number of disk write operations made by all Tasks in the Job. */ - writeIOps: number; - /** The total amount of data in GiB read from disk by all Tasks in the Job. */ - readIOGiB: number; - /** The total amount of data in GiB written to disk by all Tasks in the Job. */ - writeIOGiB: number; - /** A Task completes successfully if it returns exit code 0. */ - numSucceededTasks: number; - /** - * A Task fails if it exhausts its maximum retry count without returning exit code - * 0. - */ - numFailedTasks: number; - /** - * The total number of retries on all the Tasks in the Job during the given time - * range. - */ - numTaskRetries: number; - /** - * The wait time for a Task is defined as the elapsed time between the creation of - * the Task and the start of Task execution. (If the Task is retried due to - * failures, the wait time is the time to the most recent Task execution.) This - * value is only reported in the Account lifetime statistics; it is not included - * in the Job statistics. - */ - waitTime: string; -} - -/** An Azure Batch Job. */ -export interface BatchJobOutput { - /** - * The ID is case-preserving and case-insensitive (that is, you may not have two - * IDs within an Account that differ only by case). - */ - id?: string; - /** The display name for the Job. */ - displayName?: string; - /** - * Whether Tasks in the Job can define dependencies on each other. The default is - * false. - */ - usesTaskDependencies?: boolean; - /** The URL of the Job. */ - readonly url?: string; - /** - * This is an opaque string. You can use it to detect whether the Job has changed - * between requests. In particular, you can be pass the ETag when updating a Job - * to specify that your changes should take effect only if nobody else has - * modified the Job in the meantime. - */ - readonly eTag?: string; - /** - * This is the last time at which the Job level data, such as the Job state or - * priority, changed. It does not factor in task-level changes such as adding new - * Tasks or Tasks changing state. - */ - readonly lastModified?: string; - /** The creation time of the Job. */ - readonly creationTime?: string; - /** - * The state of the Job. - * - * Possible values: active, disabling, disabled, enabling, terminating, completed, deleting - */ - readonly state?: string; - /** The time at which the Job entered its current state. */ - readonly stateTransitionTime?: string; - /** - * This property is not set if the Job is in its initial Active state. - * - * Possible values: active, disabling, disabled, enabling, terminating, completed, deleting - */ - readonly previousState?: string; - /** This property is not set if the Job is in its initial Active state. */ - readonly previousStateTransitionTime?: string; - /** - * Priority values can range from -1000 to 1000, with -1000 being the lowest - * priority and 1000 being the highest priority. The default value is 0. - */ - priority?: number; - /** - * If the value is set to True, other high priority jobs submitted to the system - * will take precedence and will be able requeue tasks from this job. You can - * update a job's allowTaskPreemption after it has been created using the update - * job API. - */ - allowTaskPreemption?: boolean; - /** - * The value of maxParallelTasks must be -1 or greater than 0 if specified. If not - * specified, the default value is -1, which means there's no limit to the number - * of tasks that can be run at once. You can update a job's maxParallelTasks after - * it has been created using the update job API. - */ - maxParallelTasks?: number; - /** The execution constraints for a Job. */ - constraints?: JobConstraintsOutput; - /** - * The Job Manager Task is automatically started when the Job is created. The - * Batch service tries to schedule the Job Manager Task before any other Tasks in - * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where - * Job Manager Tasks are running for as long as possible (that is, Compute Nodes - * running 'normal' Tasks are removed before Compute Nodes running Job Manager - * Tasks). When a Job Manager Task fails and needs to be restarted, the system - * tries to schedule it at the highest priority. If there are no idle Compute - * Nodes available, the system may terminate one of the running Tasks in the Pool - * and return it to the queue in order to make room for the Job Manager Task to - * restart. Note that a Job Manager Task in one Job does not have priority over - * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For - * example, if a Job Manager in a priority 0 Job needs to be restarted, it will - * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery - * operation is triggered on a Node. Examples of recovery operations include (but - * are not limited to) when an unhealthy Node is rebooted or a Compute Node - * disappeared due to host failure. Retries due to recovery operations are - * independent of and are not counted against the maxTaskRetryCount. Even if the - * maxTaskRetryCount is 0, an internal retry due to a recovery operation may - * occur. Because of this, all Tasks should be idempotent. This means Tasks need - * to tolerate being interrupted and restarted without causing any corruption or - * duplicate data. The best practice for long running Tasks is to use some form of - * checkpointing. - */ - jobManagerTask?: JobManagerTaskOutput; - /** - * The Job Preparation Task is a special Task run on each Compute Node before any - * other Task of the Job. - */ - jobPreparationTask?: JobPreparationTaskOutput; - /** - * The Job Release Task is a special Task run at the end of the Job on each - * Compute Node that has run any other Task of the Job. - */ - jobReleaseTask?: JobReleaseTaskOutput; - /** - * Individual Tasks can override an environment setting specified here by - * specifying the same setting name with a different value. - */ - commonEnvironmentSettings?: Array; - /** Specifies how a Job should be assigned to a Pool. */ - poolInfo?: PoolInformationOutput; - /** - * The default is noaction. - * - * Possible values: noaction, terminatejob - */ - onAllTasksComplete?: string; - /** - * A Task is considered to have failed if has a failureInfo. A failureInfo is set - * if the Task completes with a non-zero exit code after exhausting its retry - * count, or if there was an error starting the Task, for example due to a - * resource file download error. The default is noaction. - * - * Possible values: noaction, performexitoptionsjobaction - */ - onTaskFailure?: string; - /** The network configuration for the Job. */ - networkConfiguration?: JobNetworkConfigurationOutput; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ - metadata?: Array; - /** Contains information about the execution of a Job in the Azure Batch service. */ - readonly executionInfo?: JobExecutionInformationOutput; - /** - * This property is populated only if the CloudJob was retrieved with an expand - * clause including the 'stats' attribute; otherwise it is null. The statistics - * may not be immediately available. The Batch service performs periodic roll-up - * of statistics. The typical delay is about 30 minutes. - */ - readonly stats?: JobStatisticsOutput; -} - -/** The execution constraints for a Job. */ -export interface JobConstraintsOutput { - /** - * If the Job does not complete within the time limit, the Batch service - * terminates it and any Tasks that are still running. In this case, the - * termination reason will be MaxWallClockTimeExpiry. If this property is not - * specified, there is no time limit on how long the Job may run. - */ - maxWallClockTime?: string; - /** - * Note that this value specifically controls the number of retries. The Batch - * service will try each Task once, and may then retry up to this limit. For - * example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one - * initial try and 3 retries). If the maximum retry count is 0, the Batch service - * does not retry Tasks. If the maximum retry count is -1, the Batch service - * retries the Task without limit, however this is not recommended for a start - * task or any task. The default value is 0 (no retries) - */ - maxTaskRetryCount?: number; -} - -/** - * The Job Manager Task is automatically started when the Job is created. The - * Batch service tries to schedule the Job Manager Task before any other Tasks in - * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where - * Job Manager Tasks are running for as long as possible (that is, Compute Nodes - * running 'normal' Tasks are removed before Compute Nodes running Job Manager - * Tasks). When a Job Manager Task fails and needs to be restarted, the system - * tries to schedule it at the highest priority. If there are no idle Compute - * Nodes available, the system may terminate one of the running Tasks in the Pool - * and return it to the queue in order to make room for the Job Manager Task to - * restart. Note that a Job Manager Task in one Job does not have priority over - * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For - * example, if a Job Manager in a priority 0 Job needs to be restarted, it will - * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery - * operation is triggered on a Node. Examples of recovery operations include (but - * are not limited to) when an unhealthy Node is rebooted or a Compute Node - * disappeared due to host failure. Retries due to recovery operations are - * independent of and are not counted against the maxTaskRetryCount. Even if the - * maxTaskRetryCount is 0, an internal retry due to a recovery operation may - * occur. Because of this, all Tasks should be idempotent. This means Tasks need - * to tolerate being interrupted and restarted without causing any corruption or - * duplicate data. The best practice for long running Tasks is to use some form of - * checkpointing. - */ -export interface JobManagerTaskOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. - */ - id: string; - /** - * It need not be unique and can contain any Unicode characters up to a maximum - * length of 1024. - */ - displayName?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine: string; - /** - * If the Pool that will run this Task has containerConfiguration set, this must - * be set as well. If the Pool that will run this Task doesn't have - * containerConfiguration set, this must not be set. When this is specified, all - * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure - * Batch directories on the node) are mapped into the container, all Task - * environment variables are mapped into the container, and the Task command line - * is executed in the container. Files produced in the container outside of - * AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that - * Batch file APIs will not be able to access those files. - */ - containerSettings?: TaskContainerSettingsOutput; - /** - * Files listed under this element are located in the Task's working directory. - * There is a maximum size for the list of resource files. When the max size is - * exceeded, the request will fail and the response error code will be - * RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be - * reduced in size. This can be achieved using .zip files, Application Packages, - * or Docker Containers. - */ - resourceFiles?: Array; - /** - * For multi-instance Tasks, the files will only be uploaded from the Compute Node - * on which the primary Task is executed. - */ - outputFiles?: Array; - /** A list of environment variable settings for the Job Manager Task. */ - environmentSettings?: Array; - /** Execution constraints to apply to a Task. */ - constraints?: TaskConstraintsOutput; - /** - * The default is 1. A Task can only be scheduled to run on a compute node if the - * node has enough free scheduling slots available. For multi-instance Tasks, this - * property is not supported and must not be specified. - */ - requiredSlots?: number; - /** - * If true, when the Job Manager Task completes, the Batch service marks the Job - * as complete. If any Tasks are still running at this time (other than Job - * Release), those Tasks are terminated. If false, the completion of the Job - * Manager Task does not affect the Job status. In this case, you should either - * use the onAllTasksComplete attribute to terminate the Job, or have a client or - * user terminate the Job explicitly. An example of this is if the Job Manager - * creates a set of Tasks but then takes no further role in their execution. The - * default value is true. If you are using the onAllTasksComplete and - * onTaskFailure attributes to control Job lifetime, and using the Job Manager - * Task only to create the Tasks for the Job (not to monitor progress), then it is - * important to set killJobOnCompletion to false. - */ - killJobOnCompletion?: boolean; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentityOutput; - /** - * If true, no other Tasks will run on the same Node for as long as the Job - * Manager is running. If false, other Tasks can run simultaneously with the Job - * Manager on a Compute Node. The Job Manager Task counts normally against the - * Compute Node's concurrent Task limit, so this is only relevant if the Compute - * Node allows multiple concurrent Tasks. The default value is true. - */ - runExclusive?: boolean; - /** - * Application Packages are downloaded and deployed to a shared directory, not the - * Task working directory. Therefore, if a referenced Application Package is - * already on the Compute Node, and is up to date, then it is not re-downloaded; - * the existing copy on the Compute Node is used. If a referenced Application - * Package cannot be installed, for example because the package has been deleted - * or because download failed, the Task fails. - */ - applicationPackageReferences?: Array; - /** - * If this property is set, the Batch service provides the Task with an - * authentication token which can be used to authenticate Batch service operations - * without requiring an Account access key. The token is provided via the - * AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the - * Task can carry out using the token depend on the settings. For example, a Task - * can request Job permissions in order to add other Tasks to the Job, or check - * the status of the Job or of other Tasks under the Job. - */ - authenticationTokenSettings?: AuthenticationTokenSettingsOutput; - /** The default value is true. */ - allowLowPriorityNode?: boolean; -} - -/** - * On every file uploads, Batch service writes two log files to the compute node, - * 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn - * more about a specific failure. - */ -export interface OutputFileOutput { - /** - * Both relative and absolute paths are supported. Relative paths are relative to - * the Task working directory. The following wildcards are supported: * matches 0 - * or more characters (for example pattern abc* would match abc or abcdef), ** - * matches any directory, ? matches any single character, [abc] matches one - * character in the brackets, and [a-c] matches one character in the range. - * Brackets can include a negation to match any character not specified (for - * example [!abc] matches any character but a, b, or c). If a file name starts - * with "." it is ignored by default but may be matched by specifying it - * explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple - * example: **\*.txt matches any file that does not start in '.' and ends with - * .txt in the Task working directory or any subdirectory. If the filename - * contains a wildcard character it can be escaped using brackets (for example - * abc[*] would match a file named abc*). Note that both \ and / are treated as - * directory separators on Windows, but only / is on Linux. Environment variables - * (%var% on Windows or $var on Linux) are expanded prior to the pattern being - * applied. - */ - filePattern: string; - /** The destination to which a file should be uploaded. */ - destination: OutputFileDestinationOutput; - /** - * Details about an output file upload operation, including under what conditions - * to perform the upload. - */ - uploadOptions: OutputFileUploadOptionsOutput; -} - -/** The destination to which a file should be uploaded. */ -export interface OutputFileDestinationOutput { - /** Specifies a file upload destination within an Azure blob storage container. */ - container?: OutputFileBlobContainerDestinationOutput; -} - -/** Specifies a file upload destination within an Azure blob storage container. */ -export interface OutputFileBlobContainerDestinationOutput { - /** - * If filePattern refers to a specific file (i.e. contains no wildcards), then - * path is the name of the blob to which to upload that file. If filePattern - * contains one or more wildcards (and therefore may match multiple files), then - * path is the name of the blob virtual directory (which is prepended to each blob - * name) to which to upload the file(s). If omitted, file(s) are uploaded to the - * root of the container with a blob name matching their file name. - */ - path?: string; - /** - * If not using a managed identity, the URL must include a Shared Access Signature - * (SAS) granting write permissions to the container. - */ - containerUrl: string; - /** The identity must have write access to the Azure Blob Storage container */ - identityReference?: ComputeNodeIdentityReferenceOutput; - /** - * These headers will be specified when uploading files to Azure Storage. Official - * document on allowed headers when uploading blobs: - * https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types - */ - uploadHeaders?: Array; -} - -/** An HTTP header name-value pair */ -export interface HttpHeaderOutput { - /** The case-insensitive name of the header to be used while uploading output files */ - name: string; - /** The value of the header to be used while uploading output files */ - value?: string; -} - -/** - * Details about an output file upload operation, including under what conditions - * to perform the upload. - */ -export interface OutputFileUploadOptionsOutput { - /** - * The default is taskcompletion. - * - * Possible values: tasksuccess, taskfailure, taskcompletion - */ - uploadCondition: string; -} - -/** Execution constraints to apply to a Task. */ -export interface TaskConstraintsOutput { - /** If this is not specified, there is no time limit on how long the Task may run. */ - maxWallClockTime?: string; - /** - * The default is 7 days, i.e. the Task directory will be retained for 7 days - * unless the Compute Node is removed or the Job is deleted. - */ - retentionTime?: string; - /** - * Note that this value specifically controls the number of retries for the Task - * executable due to a nonzero exit code. The Batch service will try the Task - * once, and may then retry up to this limit. For example, if the maximum retry - * count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). - * If the maximum retry count is 0, the Batch service does not retry the Task - * after the first attempt. If the maximum retry count is -1, the Batch service - * retries the Task without limit, however this is not recommended for a start - * task or any task. The default value is 0 (no retries) - */ - maxTaskRetryCount?: number; -} - -/** - * The settings for an authentication token that the Task can use to perform Batch - * service operations. - */ -export interface AuthenticationTokenSettingsOutput { - /** - * The authentication token grants access to a limited set of Batch service - * operations. Currently the only supported value for the access property is - * 'job', which grants access to all operations related to the Job which contains - * the Task. - */ - access?: string[]; -} - -/** - * You can use Job Preparation to prepare a Node to run Tasks for the Job. - * Activities commonly performed in Job Preparation include: Downloading common - * resource files used by all the Tasks in the Job. The Job Preparation Task can - * download these common resource files to the shared location on the Node. - * (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the Node so - * that all Tasks of that Job can communicate with it. If the Job Preparation Task - * fails (that is, exhausts its retry count before exiting with exit code 0), - * Batch will not run Tasks of this Job on the Node. The Compute Node remains - * ineligible to run Tasks of this Job until it is reimaged. The Compute Node - * remains active and can be used for other Jobs. The Job Preparation Task can run - * multiple times on the same Node. Therefore, you should write the Job - * Preparation Task to handle re-execution. If the Node is rebooted, the Job - * Preparation Task is run again on the Compute Node before scheduling any other - * Task of the Job, if rerunOnNodeRebootAfterSuccess is true or if the Job - * Preparation Task did not previously complete. If the Node is reimaged, the Job - * Preparation Task is run again before scheduling any Task of the Job. Batch will - * retry Tasks when a recovery operation is triggered on a Node. Examples of - * recovery operations include (but are not limited to) when an unhealthy Node is - * rebooted or a Compute Node disappeared due to host failure. Retries due to - * recovery operations are independent of and are not counted against the - * maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to - * a recovery operation may occur. Because of this, all Tasks should be - * idempotent. This means Tasks need to tolerate being interrupted and restarted - * without causing any corruption or duplicate data. The best practice for long - * running Tasks is to use some form of checkpointing. - */ -export interface JobPreparationTaskOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. If you do not - * specify this property, the Batch service assigns a default value of - * 'jobpreparation'. No other Task in the Job can have the same ID as the Job - * Preparation Task. If you try to submit a Task with the same id, the Batch - * service rejects the request with error code TaskIdSameAsJobPreparationTask; if - * you are calling the REST API directly, the HTTP status code is 409 (Conflict). - */ - id?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ - containerSettings?: TaskContainerSettingsOutput; - /** - * Files listed under this element are located in the Task's working directory. - * There is a maximum size for the list of resource files. When the max size is - * exceeded, the request will fail and the response error code will be - * RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be - * reduced in size. This can be achieved using .zip files, Application Packages, - * or Docker Containers. - */ - resourceFiles?: Array; - /** A list of environment variable settings for the Job Preparation Task. */ - environmentSettings?: Array; - /** Execution constraints to apply to a Task. */ - constraints?: TaskConstraintsOutput; - /** - * If true and the Job Preparation Task fails on a Node, the Batch service retries - * the Job Preparation Task up to its maximum retry count (as specified in the - * constraints element). If the Task has still not completed successfully after - * all retries, then the Batch service will not schedule Tasks of the Job to the - * Node. The Node remains active and eligible to run Tasks of other Jobs. If - * false, the Batch service will not wait for the Job Preparation Task to - * complete. In this case, other Tasks of the Job can start executing on the - * Compute Node while the Job Preparation Task is still running; and even if the - * Job Preparation Task fails, new Tasks will continue to be scheduled on the - * Compute Node. The default value is true. - */ - waitForSuccess?: boolean; - /** - * If omitted, the Task runs as a non-administrative user unique to the Task on - * Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux - * Compute Nodes. - */ - userIdentity?: UserIdentityOutput; - /** - * The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - * the Job Preparation Task did not complete (e.g. because the reboot occurred - * while the Task was running). Therefore, you should always write a Job - * Preparation Task to be idempotent and to behave correctly if run multiple - * times. The default value is true. - */ - rerunOnNodeRebootAfterSuccess?: boolean; -} - -/** - * The Job Release Task runs when the Job ends, because of one of the following: - * The user calls the Terminate Job API, or the Delete Job API while the Job is - * still active, the Job's maximum wall clock time constraint is reached, and the - * Job is still active, or the Job's Job Manager Task completed, and the Job is - * configured to terminate when the Job Manager completes. The Job Release Task - * runs on each Node where Tasks of the Job have run and the Job Preparation Task - * ran and completed. If you reimage a Node after it has run the Job Preparation - * Task, and the Job ends without any further Tasks of the Job running on that - * Node (and hence the Job Preparation Task does not re-run), then the Job Release - * Task does not run on that Compute Node. If a Node reboots while the Job Release - * Task is still running, the Job Release Task runs again when the Compute Node - * starts up. The Job is not marked as complete until all Job Release Tasks have - * completed. The Job Release Task runs in the background. It does not occupy a - * scheduling slot; that is, it does not count towards the taskSlotsPerNode limit - * specified on the Pool. - */ -export interface JobReleaseTaskOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. If you do not - * specify this property, the Batch service assigns a default value of - * 'jobrelease'. No other Task in the Job can have the same ID as the Job Release - * Task. If you try to submit a Task with the same id, the Batch service rejects - * the request with error code TaskIdSameAsJobReleaseTask; if you are calling the - * REST API directly, the HTTP status code is 409 (Conflict). - */ - id?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ - containerSettings?: TaskContainerSettingsOutput; - /** Files listed under this element are located in the Task's working directory. */ - resourceFiles?: Array; - /** A list of environment variable settings for the Job Release Task. */ - environmentSettings?: Array; - /** - * The maximum elapsed time that the Job Release Task may run on a given Compute - * Node, measured from the time the Task starts. If the Task does not complete - * within the time limit, the Batch service terminates it. The default value is 15 - * minutes. You may not specify a timeout longer than 15 minutes. If you do, the - * Batch service rejects it with an error; if you are calling the REST API - * directly, the HTTP status code is 400 (Bad Request). - */ - maxWallClockTime?: string; - /** - * The default is 7 days, i.e. the Task directory will be retained for 7 days - * unless the Compute Node is removed or the Job is deleted. - */ - retentionTime?: string; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentityOutput; -} - -/** Specifies how a Job should be assigned to a Pool. */ -export interface PoolInformationOutput { - /** - * You must ensure that the Pool referenced by this property exists. If the Pool - * does not exist at the time the Batch service tries to schedule a Job, no Tasks - * for the Job will run until you create a Pool with that id. Note that the Batch - * service will not reject the Job request; it will simply not run Tasks until the - * Pool exists. You must specify either the Pool ID or the auto Pool - * specification, but not both. - */ - poolId?: string; - /** - * If auto Pool creation fails, the Batch service moves the Job to a completed - * state, and the Pool creation error is set in the Job's scheduling error - * property. The Batch service manages the lifetime (both creation and, unless - * keepAlive is specified, deletion) of the auto Pool. Any user actions that - * affect the lifetime of the auto Pool while the Job is active will result in - * unexpected behavior. You must specify either the Pool ID or the auto Pool - * specification, but not both. - */ - autoPoolSpecification?: AutoPoolSpecificationOutput; -} - -/** - * Specifies characteristics for a temporary 'auto pool'. The Batch service will - * create this auto Pool when the Job is submitted. - */ -export interface AutoPoolSpecificationOutput { - /** - * The Batch service assigns each auto Pool a unique identifier on creation. To - * distinguish between Pools created for different purposes, you can specify this - * element to add a prefix to the ID that is assigned. The prefix can be up to 20 - * characters long. - */ - autoPoolIdPrefix?: string; - /** - * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule - * are assigned to Pools. - * - * Possible values: jobschedule, job - */ - poolLifetimeOption: string; - /** - * If false, the Batch service deletes the Pool once its lifetime (as determined - * by the poolLifetimeOption setting) expires; that is, when the Job or Job - * Schedule completes. If true, the Batch service does not delete the Pool - * automatically. It is up to the user to delete auto Pools created with this - * option. - */ - keepAlive?: boolean; - /** Specification for creating a new Pool. */ - pool?: PoolSpecificationOutput; -} - -/** Specification for creating a new Pool. */ -export interface PoolSpecificationOutput { - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ - displayName?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ - vmSize: string; - /** - * This property must be specified if the Pool needs to be created with Azure PaaS - * VMs. This property and virtualMachineConfiguration are mutually exclusive and - * one of the properties must be specified. If neither is specified then the Batch - * service returns an error; if you are calling the REST API directly, the HTTP - * status code is 400 (Bad Request). This property cannot be specified if the - * Batch Account was created with its poolAllocationMode property set to - * 'UserSubscription'. - */ - cloudServiceConfiguration?: CloudServiceConfigurationOutput; - /** - * This property must be specified if the Pool needs to be created with Azure IaaS - * VMs. This property and cloudServiceConfiguration are mutually exclusive and one - * of the properties must be specified. If neither is specified then the Batch - * service returns an error; if you are calling the REST API directly, the HTTP - * status code is 400 (Bad Request). - */ - virtualMachineConfiguration?: VirtualMachineConfigurationOutput; - /** - * The default value is 1. The maximum value is the smaller of 4 times the number - * of cores of the vmSize of the pool or 256. - */ - taskSlotsPerNode?: number; - /** If not specified, the default is spread. */ - taskSchedulingPolicy?: TaskSchedulingPolicyOutput; - /** - * This timeout applies only to manual scaling; it has no effect when - * enableAutoScale is set to true. The default value is 15 minutes. The minimum - * value is 5 minutes. If you specify a value less than 5 minutes, the Batch - * service rejects the request with an error; if you are calling the REST API - * directly, the HTTP status code is 400 (Bad Request). - */ - resizeTimeout?: string; - /** - * This property must not be specified if enableAutoScale is set to true. If - * enableAutoScale is set to false, then you must set either targetDedicatedNodes, - * targetLowPriorityNodes, or both. - */ - targetDedicatedNodes?: number; - /** - * This property must not be specified if enableAutoScale is set to true. If - * enableAutoScale is set to false, then you must set either targetDedicatedNodes, - * targetLowPriorityNodes, or both. - */ - targetLowPriorityNodes?: number; - /** - * If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must - * be specified. If true, the autoScaleFormula element is required. The Pool - * automatically resizes according to the formula. The default value is false. - */ - enableAutoScale?: boolean; - /** - * This property must not be specified if enableAutoScale is set to false. It is - * required if enableAutoScale is set to true. The formula is checked for validity - * before the Pool is created. If the formula is not valid, the Batch service - * rejects the request with detailed error information. - */ - autoScaleFormula?: string; - /** - * The default value is 15 minutes. The minimum and maximum value are 5 minutes - * and 168 hours respectively. If you specify a value less than 5 minutes or - * greater than 168 hours, the Batch service rejects the request with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). - */ - autoScaleEvaluationInterval?: string; - /** - * Enabling inter-node communication limits the maximum size of the Pool due to - * deployment restrictions on the Compute Nodes of the Pool. This may result in - * the Pool not reaching its desired size. The default value is false. - */ - enableInterNodeCommunication?: boolean; - /** The network configuration for a Pool. */ - networkConfiguration?: NetworkConfigurationOutput; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ - startTask?: StartTaskOutput; - /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. - */ - certificateReferences?: Array; - /** - * When creating a pool, the package's application ID must be fully qualified - * (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - * Changes to Package references affect all new Nodes joining the Pool, but do not - * affect Compute Nodes that are already in the Pool until they are rebooted or - * reimaged. There is a maximum of 10 Package references on any given Pool. - */ - applicationPackageReferences?: Array; - /** - * The list of application licenses must be a subset of available Batch service - * application licenses. If a license is requested which is not supported, Pool - * creation will fail. The permitted licenses available on the Pool are 'maya', - * 'vray', '3dsmax', 'arnold'. An additional charge applies for each application - * license added to the Pool. - */ - applicationLicenses?: string[]; - /** The list of user Accounts to be created on each Compute Node in the Pool. */ - userAccounts?: Array; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ - metadata?: Array; - /** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ - mountConfiguration?: Array; - /** - * If omitted, the default value is Default. - * - * Possible values: default, classic, simplified - */ - targetNodeCommunicationMode?: string; -} - -/** The network configuration for the Job. */ -export interface JobNetworkConfigurationOutput { - /** - * The virtual network must be in the same region and subscription as the Azure - * Batch Account. The specified subnet should have enough free IP addresses to - * accommodate the number of Compute Nodes which will run Tasks from the Job. This - * can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' - * service principal must have the 'Classic Virtual Machine Contributor' - * Role-Based Access Control (RBAC) role for the specified VNet so that Azure - * Batch service can schedule Tasks on the Nodes. This can be verified by checking - * if the specified VNet has any associated Network Security Groups (NSG). If - * communication to the Nodes in the specified subnet is denied by an NSG, then - * the Batch service will set the state of the Compute Nodes to unusable. This is - * of the form - * /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - * If the specified VNet has any associated Network Security Groups (NSG), then a - * few reserved system ports must be enabled for inbound communication from the - * Azure Batch service. For Pools created with a Virtual Machine configuration, - * enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for - * Windows. Port 443 is also required to be open for outbound connections for - * communications to Azure Storage. For more details see: - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - */ - subnetId: string; -} - -/** Contains information about the execution of a Job in the Azure Batch service. */ -export interface JobExecutionInformationOutput { - /** This is the time at which the Job was created. */ - startTime: string; - /** This property is set only if the Job is in the completed state. */ - endTime?: string; - /** - * This element contains the actual Pool where the Job is assigned. When you get - * Job details from the service, they also contain a poolInfo element, which - * contains the Pool configuration data from when the Job was added or updated. - * That poolInfo element may also contain a poolId element. If it does, the two - * IDs are the same. If it does not, it means the Job ran on an auto Pool, and - * this property contains the ID of that auto Pool. - */ - poolId?: string; - /** This property is not set if there was no error starting the Job. */ - schedulingError?: JobSchedulingErrorOutput; - /** - * This property is set only if the Job is in the completed state. If the Batch - * service terminates the Job, it sets the reason as follows: JMComplete - the Job - * Manager Task completed, and killJobOnCompletion was set to true. - * MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. - * TerminateJobSchedule - the Job ran as part of a schedule, and the schedule - * terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to - * terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's - * onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in - * the Job failed with an exit condition that specified a jobAction of - * terminatejob. Any other string is a user-defined reason specified in a call to - * the 'Terminate a Job' operation. - */ - terminateReason?: string; -} - -/** An error encountered by the Batch service when scheduling a Job. */ -export interface JobSchedulingErrorOutput { - /** - * The category of the error. - * - * Possible values: usererror, servererror - */ - category: string; - /** - * An identifier for the Job scheduling error. Codes are invariant and are - * intended to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Job scheduling error, intended to be suitable for - * display in a user interface. - */ - message?: string; - /** A list of additional error details related to the scheduling error. */ - details?: Array; -} - -/** The result of listing the Jobs in an Account. */ -export interface BatchJobListResultOutput { - /** The list of Jobs. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** - * The result of listing the status of the Job Preparation and Job Release Tasks - * for a Job. - */ -export interface BatchJobListPreparationAndReleaseTaskStatusResultOutput { - /** A list of Job Preparation and Job Release Task execution information. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** The status of the Job Preparation and Job Release Tasks on a Compute Node. */ -export interface JobPreparationAndReleaseTaskExecutionInformationOutput { - /** The ID of the Pool containing the Compute Node to which this entry refers. */ - poolId?: string; - /** The ID of the Compute Node to which this entry refers. */ - nodeId?: string; - /** The URL of the Compute Node to which this entry refers. */ - nodeUrl?: string; - /** - * Contains information about the execution of a Job Preparation Task on a Compute - * Node. - */ - jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformationOutput; - /** This property is set only if the Job Release Task has run on the Compute Node. */ - jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformationOutput; -} - -/** - * Contains information about the execution of a Job Preparation Task on a Compute - * Node. - */ -export interface JobPreparationTaskExecutionInformationOutput { - /** - * If the Task has been restarted or retried, this is the most recent time at - * which the Task started running. - */ - startTime: string; - /** This property is set only if the Task is in the Completed state. */ - endTime?: string; - /** - * The current state of the Job Preparation Task on the Compute Node. - * - * Possible values: running, completed - */ - state: string; - /** - * The root directory of the Job Preparation Task on the Compute Node. You can use - * this path to retrieve files created by the Task, such as log files. - */ - taskRootDirectory?: string; - /** The URL to the root directory of the Job Preparation Task on the Compute Node. */ - taskRootDirectoryUrl?: string; - /** - * This parameter is returned only if the Task is in the completed state. The exit - * code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. Note that the exit code may also be generated by the - * Compute Node operating system, such as when a process is forcibly terminated. - */ - exitCode?: number; - /** This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ - failureInfo?: TaskFailureInformationOutput; - /** - * Task application failures (non-zero exit code) are retried, pre-processing - * errors (the Task could not be run) and file upload errors are not retried. The - * Batch service will retry the Task up to the limit specified by the constraints. - */ - retryCount: number; - /** - * This property is set only if the Task was retried (i.e. retryCount is nonzero). - * If present, this is typically the same as startTime, but may be different if - * the Task has been restarted for reasons other than retry; for example, if the - * Compute Node was rebooted during a retry, then the startTime is updated but the - * lastRetryTime is not. - */ - lastRetryTime?: string; - /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. - * - * Possible values: success, failure - */ - result?: string; -} - -/** Contains information about the container which a Task is executing. */ -export interface TaskContainerExecutionInformationOutput { - /** The ID of the container. */ - containerId?: string; - /** - * This is the state of the container according to the Docker service. It is - * equivalent to the status field returned by "docker inspect". - */ - state?: string; - /** - * This is the detailed error string from the Docker service, if available. It is - * equivalent to the error field returned by "docker inspect". - */ - error?: string; -} - -/** Information about a Task failure. */ -export interface TaskFailureInformationOutput { - /** - * The category of the error. - * - * Possible values: usererror, servererror - */ - category: string; - /** - * An identifier for the Task error. Codes are invariant and are intended to be - * consumed programmatically. - */ - code?: string; - /** - * A message describing the Task error, intended to be suitable for display in a - * user interface. - */ - message?: string; - /** A list of additional details related to the error. */ - details?: Array; -} - -/** - * Contains information about the execution of a Job Release Task on a Compute - * Node. - */ -export interface JobReleaseTaskExecutionInformationOutput { - /** - * If the Task has been restarted or retried, this is the most recent time at - * which the Task started running. - */ - startTime: string; - /** This property is set only if the Task is in the Completed state. */ - endTime?: string; - /** - * The current state of the Job Release Task on the Compute Node. - * - * Possible values: running, completed - */ - state: string; - /** - * The root directory of the Job Release Task on the Compute Node. You can use - * this path to retrieve files created by the Task, such as log files. - */ - taskRootDirectory?: string; - /** The URL to the root directory of the Job Release Task on the Compute Node. */ - taskRootDirectoryUrl?: string; - /** - * This parameter is returned only if the Task is in the completed state. The exit - * code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. Note that the exit code may also be generated by the - * Compute Node operating system, such as when a process is forcibly terminated. - */ - exitCode?: number; - /** This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ - failureInfo?: TaskFailureInformationOutput; - /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. - * - * Possible values: success, failure - */ - result?: string; -} - -/** The Task and TaskSlot counts for a Job. */ -export interface TaskCountsResultOutput { - /** The Task counts for a Job. */ - readonly taskCounts: TaskCountsOutput; - /** The TaskSlot counts for a Job. */ - taskSlotCounts: TaskSlotCountsOutput; -} - -/** The Task counts for a Job. */ -export interface TaskCountsOutput { - /** The number of Tasks in the active state. */ - active: number; - /** The number of Tasks in the running or preparing state. */ - running: number; - /** The number of Tasks in the completed state. */ - completed: number; - /** - * The number of Tasks which succeeded. A Task succeeds if its result (found in - * the executionInfo property) is 'success'. - */ - succeeded: number; - /** - * The number of Tasks which failed. A Task fails if its result (found in the - * executionInfo property) is 'failure'. - */ - failed: number; -} - -/** The TaskSlot counts for a Job. */ -export interface TaskSlotCountsOutput { - /** The number of TaskSlots for active Tasks. */ - active: number; - /** The number of TaskSlots for running Tasks. */ - running: number; - /** The number of TaskSlots for completed Tasks. */ - completed: number; - /** The number of TaskSlots for succeeded Tasks. */ - succeeded: number; - /** The number of TaskSlots for failed Tasks. */ - failed: number; -} - -/** - * A Certificate that can be installed on Compute Nodes and can be used to - * authenticate operations on the machine. - */ -export interface CertificateOutput { - /** - * The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex - * digits. - */ - thumbprint?: string; - /** The algorithm used to derive the thumbprint. */ - thumbprintAlgorithm?: string; - /** The URL of the Certificate. */ - readonly url?: string; - /** - * The state of the Certificate. - * - * Possible values: active, deleting, deletefailed - */ - readonly state?: string; - /** The time at which the Certificate entered its current state. */ - readonly stateTransitionTime?: string; - /** - * This property is not set if the Certificate is in its initial active state. - * - * Possible values: active, deleting, deletefailed - */ - readonly previousState?: string; - /** This property is not set if the Certificate is in its initial Active state. */ - readonly previousStateTransitionTime?: string; - /** The public part of the Certificate as a base-64 encoded .cer file. */ - readonly publicData?: string; - /** This property is set only if the Certificate is in the DeleteFailed state. */ - readonly deleteCertificateError?: DeleteCertificateErrorOutput; - /** The base64-encoded contents of the Certificate. The maximum size is 10KB. */ - data?: string; - /** - * The format of the Certificate data. - * - * Possible values: pfx, cer - */ - certificateFormat?: string; - /** This must be omitted if the Certificate format is cer. */ - password?: string; -} - -/** An error encountered by the Batch service when deleting a Certificate. */ -export interface DeleteCertificateErrorOutput { - /** - * An identifier for the Certificate deletion error. Codes are invariant and are - * intended to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Certificate deletion error, intended to be suitable - * for display in a user interface. - */ - message?: string; - /** - * This list includes details such as the active Pools and Compute Nodes - * referencing this Certificate. However, if a large number of resources reference - * the Certificate, the list contains only about the first hundred. - */ - values?: Array; -} - -/** The result of listing the Certificates in the Account. */ -export interface CertificateListResultOutput { - /** The list of Certificates. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** - * The result of listing the files on a Compute Node, or the files associated with - * a Task on a Compute Node. - */ -export interface NodeFileListResultOutput { - /** The list of files. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** Information about a file or directory on a Compute Node. */ -export interface NodeFileOutput { - /** The file path. */ - name?: string; - /** The URL of the file. */ - url?: string; - /** Whether the object represents a directory. */ - isDirectory?: boolean; - /** The properties of a file on a Compute Node. */ - properties?: FilePropertiesOutput; -} - -/** The properties of a file on a Compute Node. */ -export interface FilePropertiesOutput { - /** The creation time is not returned for files on Linux Compute Nodes. */ - creationTime?: string; - /** The time at which the file was last modified. */ - lastModified: string; - /** The length of the file. */ - contentLength: number; - /** The content type of the file. */ - contentType?: string; - /** The file mode is returned only for files on Linux Compute Nodes. */ - fileMode?: string; -} - -/** - * A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a - * specification used to create each Job. - */ -export interface BatchJobScheduleOutput { - /** A string that uniquely identifies the schedule within the Account. */ - id?: string; - /** The display name for the schedule. */ - displayName?: string; - /** The URL of the Job Schedule. */ - readonly url?: string; - /** - * This is an opaque string. You can use it to detect whether the Job Schedule has - * changed between requests. In particular, you can be pass the ETag with an - * Update Job Schedule request to specify that your changes should take effect - * only if nobody else has modified the schedule in the meantime. - */ - readonly eTag?: string; - /** - * This is the last time at which the schedule level data, such as the Job - * specification or recurrence information, changed. It does not factor in - * job-level changes such as new Jobs being created or Jobs changing state. - */ - readonly lastModified?: string; - /** The creation time of the Job Schedule. */ - readonly creationTime?: string; - /** - * The state of the Job Schedule. - * - * Possible values: active, completed, disabled, terminating, deleting - */ - readonly state?: string; - /** The time at which the Job Schedule entered the current state. */ - readonly stateTransitionTime?: string; - /** - * This property is not present if the Job Schedule is in its initial active state. - * - * Possible values: active, completed, disabled, terminating, deleting - */ - readonly previousState?: string; - /** This property is not present if the Job Schedule is in its initial active state. */ - readonly previousStateTransitionTime?: string; - /** - * All times are fixed respective to UTC and are not impacted by daylight saving - * time. - */ - schedule?: ScheduleOutput; - /** Specifies details of the Jobs to be created on a schedule. */ - jobSpecification?: JobSpecificationOutput; - /** - * Contains information about Jobs that have been and will be run under a Job - * Schedule. - */ - readonly executionInfo?: JobScheduleExecutionInformationOutput; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ - metadata?: Array; - /** Resource usage statistics for a Job Schedule. */ - readonly stats?: JobScheduleStatisticsOutput; -} - -/** - * The schedule according to which Jobs will be created. All times are fixed - * respective to UTC and are not impacted by daylight saving time. - */ -export interface ScheduleOutput { - /** - * If you do not specify a doNotRunUntil time, the schedule becomes ready to - * create Jobs immediately. - */ - doNotRunUntil?: string; - /** - * If you do not specify a doNotRunAfter time, and you are creating a recurring - * Job Schedule, the Job Schedule will remain active until you explicitly - * terminate it. - */ - doNotRunAfter?: string; - /** - * If a Job is not created within the startWindow interval, then the 'opportunity' - * is lost; no Job will be created until the next recurrence of the schedule. If - * the schedule is recurring, and the startWindow is longer than the recurrence - * interval, then this is equivalent to an infinite startWindow, because the Job - * that is 'due' in one recurrenceInterval is not carried forward into the next - * recurrence interval. The default is infinite. The minimum value is 1 minute. If - * you specify a lower value, the Batch service rejects the schedule with an - * error; if you are calling the REST API directly, the HTTP status code is 400 - * (Bad Request). - */ - startWindow?: string; - /** - * Because a Job Schedule can have at most one active Job under it at any given - * time, if it is time to create a new Job under a Job Schedule, but the previous - * Job is still running, the Batch service will not create the new Job until the - * previous Job finishes. If the previous Job does not finish within the - * startWindow period of the new recurrenceInterval, then no new Job will be - * scheduled for that interval. For recurring Jobs, you should normally specify a - * jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you - * will need an external process to monitor when Jobs are created, add Tasks to - * the Jobs and terminate the Jobs ready for the next recurrence. The default is - * that the schedule does not recur: one Job is created, within the startWindow - * after the doNotRunUntil time, and the schedule is complete as soon as that Job - * finishes. The minimum value is 1 minute. If you specify a lower value, the - * Batch service rejects the schedule with an error; if you are calling the REST - * API directly, the HTTP status code is 400 (Bad Request). - */ - recurrenceInterval?: string; -} - -/** Specifies details of the Jobs to be created on a schedule. */ -export interface JobSpecificationOutput { - /** - * Priority values can range from -1000 to 1000, with -1000 being the lowest - * priority and 1000 being the highest priority. The default value is 0. This - * priority is used as the default for all Jobs under the Job Schedule. You can - * update a Job's priority after it has been created using by using the update Job - * API. - */ - priority?: number; - /** - * If the value is set to True, other high priority jobs submitted to the system - * will take precedence and will be able requeue tasks from this job. You can - * update a job's allowTaskPreemption after it has been created using the update - * job API. - */ - allowTaskPreemption?: boolean; - /** - * The value of maxParallelTasks must be -1 or greater than 0 if specified. If not - * specified, the default value is -1, which means there's no limit to the number - * of tasks that can be run at once. You can update a job's maxParallelTasks after - * it has been created using the update job API. - */ - maxParallelTasks?: number; - /** - * The name need not be unique and can contain any Unicode characters up to a - * maximum length of 1024. - */ - displayName?: string; - /** - * Whether Tasks in the Job can define dependencies on each other. The default is - * false. - */ - usesTaskDependencies?: boolean; - /** - * Note that if a Job contains no Tasks, then all Tasks are considered complete. - * This option is therefore most commonly used with a Job Manager task; if you - * want to use automatic Job termination without a Job Manager, you should - * initially set onAllTasksComplete to noaction and update the Job properties to - * set onAllTasksComplete to terminatejob once you have finished adding Tasks. The - * default is noaction. - * - * Possible values: noaction, terminatejob - */ - onAllTasksComplete?: string; - /** - * The default is noaction. - * - * Possible values: noaction, performexitoptionsjobaction - */ - onTaskFailure?: string; - /** The network configuration for the Job. */ - networkConfiguration?: JobNetworkConfigurationOutput; - /** The execution constraints for a Job. */ - constraints?: JobConstraintsOutput; - /** - * If the Job does not specify a Job Manager Task, the user must explicitly add - * Tasks to the Job using the Task API. If the Job does specify a Job Manager - * Task, the Batch service creates the Job Manager Task when the Job is created, - * and will try to schedule the Job Manager Task before scheduling other Tasks in - * the Job. - */ - jobManagerTask?: JobManagerTaskOutput; - /** - * If a Job has a Job Preparation Task, the Batch service will run the Job - * Preparation Task on a Node before starting any Tasks of that Job on that - * Compute Node. - */ - jobPreparationTask?: JobPreparationTaskOutput; - /** - * The primary purpose of the Job Release Task is to undo changes to Nodes made by - * the Job Preparation Task. Example activities include deleting local files, or - * shutting down services that were started as part of Job preparation. A Job - * Release Task cannot be specified without also specifying a Job Preparation Task - * for the Job. The Batch service runs the Job Release Task on the Compute Nodes - * that have run the Job Preparation Task. - */ - jobReleaseTask?: JobReleaseTaskOutput; - /** - * Individual Tasks can override an environment setting specified here by - * specifying the same setting name with a different value. - */ - commonEnvironmentSettings?: Array; - /** Specifies how a Job should be assigned to a Pool. */ - poolInfo: PoolInformationOutput; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ - metadata?: Array; -} - -/** - * Contains information about Jobs that have been and will be run under a Job - * Schedule. - */ -export interface JobScheduleExecutionInformationOutput { - /** - * This property is meaningful only if the schedule is in the active state when - * the time comes around. For example, if the schedule is disabled, no Job will be - * created at nextRunTime unless the Job is enabled before then. - */ - nextRunTime?: string; - /** - * This property is present only if the at least one Job has run under the - * schedule. - */ - recentJob?: RecentJobOutput; - /** This property is set only if the Job Schedule is in the completed state. */ - endTime?: string; -} - -/** Information about the most recent Job to run under the Job Schedule. */ -export interface RecentJobOutput { - /** The ID of the Job. */ - id?: string; - /** The URL of the Job. */ - url?: string; -} - -/** Resource usage statistics for a Job Schedule. */ -export interface JobScheduleStatisticsOutput { - /** The URL of the statistics. */ - url: string; - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in all Jobs created under the schedule. - */ - userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in all Jobs created under the schedule. - */ - kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on a - * Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If a Task was retried, this - * includes the wall clock time of all the Task retries. - */ - wallClockTime: string; - /** - * The total number of disk read operations made by all Tasks in all Jobs created - * under the schedule. - */ - readIOps: number; - /** - * The total number of disk write operations made by all Tasks in all Jobs created - * under the schedule. - */ - writeIOps: number; - /** - * The total gibibytes read from disk by all Tasks in all Jobs created under the - * schedule. - */ - readIOGiB: number; - /** - * The total gibibytes written to disk by all Tasks in all Jobs created under the - * schedule. - */ - writeIOGiB: number; - /** - * The total number of Tasks successfully completed during the given time range in - * Jobs created under the schedule. A Task completes successfully if it returns - * exit code 0. - */ - numSucceededTasks: number; - /** - * The total number of Tasks that failed during the given time range in Jobs - * created under the schedule. A Task fails if it exhausts its maximum retry count - * without returning exit code 0. - */ - numFailedTasks: number; - /** - * The total number of retries during the given time range on all Tasks in all - * Jobs created under the schedule. - */ - numTaskRetries: number; - /** - * This value is only reported in the Account lifetime statistics; it is not - * included in the Job statistics. - */ - waitTime: string; -} - -/** The result of listing the Job Schedules in an Account. */ -export interface BatchJobScheduleListResultOutput { - /** The list of Job Schedules. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. - */ -export interface BatchTaskOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores, and cannot contain more than 64 characters. - */ - id?: string; - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ - displayName?: string; - /** The URL of the Task. */ - readonly url?: string; - /** - * This is an opaque string. You can use it to detect whether the Task has changed - * between requests. In particular, you can be pass the ETag when updating a Task - * to specify that your changes should take effect only if nobody else has - * modified the Task in the meantime. - */ - readonly eTag?: string; - /** The last modified time of the Task. */ - readonly lastModified?: string; - /** The creation time of the Task. */ - readonly creationTime?: string; - /** How the Batch service should respond when the Task completes. */ - exitConditions?: ExitConditionsOutput; - /** - * The state of the Task. - * - * Possible values: active, preparing, running, completed - */ - readonly state?: string; - /** The time at which the Task entered its current state. */ - readonly stateTransitionTime?: string; - /** - * This property is not set if the Task is in its initial Active state. - * - * Possible values: active, preparing, running, completed - */ - readonly previousState?: string; - /** This property is not set if the Task is in its initial Active state. */ - readonly previousStateTransitionTime?: string; - /** - * For multi-instance Tasks, the command line is executed as the primary Task, - * after the primary Task and all subtasks have finished executing the - * coordination command line. The command line does not run under a shell, and - * therefore cannot take advantage of shell features such as environment variable - * expansion. If you want to take advantage of such features, you should invoke - * the shell in the command line, for example using "cmd /c MyCommand" in - * Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - * file paths, it should use a relative path (relative to the Task working - * directory), or use the Batch provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine?: string; - /** - * If the Pool that will run this Task has containerConfiguration set, this must - * be set as well. If the Pool that will run this Task doesn't have - * containerConfiguration set, this must not be set. When this is specified, all - * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure - * Batch directories on the node) are mapped into the container, all Task - * environment variables are mapped into the container, and the Task command line - * is executed in the container. Files produced in the container outside of - * AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that - * Batch file APIs will not be able to access those files. - */ - containerSettings?: TaskContainerSettingsOutput; - /** - * For multi-instance Tasks, the resource files will only be downloaded to the - * Compute Node on which the primary Task is executed. There is a maximum size for - * the list of resource files. When the max size is exceeded, the request will - * fail and the response error code will be RequestEntityTooLarge. If this occurs, - * the collection of ResourceFiles must be reduced in size. This can be achieved - * using .zip files, Application Packages, or Docker Containers. - */ - resourceFiles?: Array; - /** - * For multi-instance Tasks, the files will only be uploaded from the Compute Node - * on which the primary Task is executed. - */ - outputFiles?: Array; - /** A list of environment variable settings for the Task. */ - environmentSettings?: Array; - /** - * A locality hint that can be used by the Batch service to select a Compute Node - * on which to start a Task. - */ - affinityInfo?: AffinityInformationOutput; - /** Execution constraints to apply to a Task. */ - constraints?: TaskConstraintsOutput; - /** - * The default is 1. A Task can only be scheduled to run on a compute node if the - * node has enough free scheduling slots available. For multi-instance Tasks, this - * must be 1. - */ - requiredSlots?: number; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentityOutput; - /** Information about the execution of a Task. */ - readonly executionInfo?: TaskExecutionInformationOutput; - /** Information about the Compute Node on which a Task ran. */ - readonly nodeInfo?: ComputeNodeInformationOutput; - /** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ - multiInstanceSettings?: MultiInstanceSettingsOutput; - /** Resource usage statistics for a Task. */ - readonly stats?: TaskStatisticsOutput; - /** - * This Task will not be scheduled until all Tasks that it depends on have - * completed successfully. If any of those Tasks fail and exhaust their retry - * counts, this Task will never be scheduled. - */ - dependsOn?: TaskDependenciesOutput; - /** - * Application packages are downloaded and deployed to a shared directory, not the - * Task working directory. Therefore, if a referenced package is already on the - * Node, and is up to date, then it is not re-downloaded; the existing copy on the - * Compute Node is used. If a referenced Package cannot be installed, for example - * because the package has been deleted or because download failed, the Task - * fails. - */ - applicationPackageReferences?: Array; - /** - * If this property is set, the Batch service provides the Task with an - * authentication token which can be used to authenticate Batch service operations - * without requiring an Account access key. The token is provided via the - * AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the - * Task can carry out using the token depend on the settings. For example, a Task - * can request Job permissions in order to add other Tasks to the Job, or check - * the status of the Job or of other Tasks under the Job. - */ - authenticationTokenSettings?: AuthenticationTokenSettingsOutput; -} - -/** Specifies how the Batch service should respond when the Task completes. */ -export interface ExitConditionsOutput { - /** - * A list of individual Task exit codes and how the Batch service should respond - * to them. - */ - exitCodes?: Array; - /** - * A list of Task exit code ranges and how the Batch service should respond to - * them. - */ - exitCodeRanges?: Array; - /** Specifies how the Batch service responds to a particular exit condition. */ - preProcessingError?: ExitOptionsOutput; - /** - * If the Task exited with an exit code that was specified via exitCodes or - * exitCodeRanges, and then encountered a file upload error, then the action - * specified by the exit code takes precedence. - */ - fileUploadError?: ExitOptionsOutput; - /** - * This value is used if the Task exits with any nonzero exit code not listed in - * the exitCodes or exitCodeRanges collection, with a pre-processing error if the - * preProcessingError property is not present, or with a file upload error if the - * fileUploadError property is not present. If you want non-default behavior on - * exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges - * collection. - */ - default?: ExitOptionsOutput; -} - -/** - * How the Batch service should respond if a Task exits with a particular exit - * code. - */ -export interface ExitCodeMappingOutput { - /** A process exit code. */ - code: number; - /** Specifies how the Batch service responds to a particular exit condition. */ - exitOptions: ExitOptionsOutput; -} - -/** Specifies how the Batch service responds to a particular exit condition. */ -export interface ExitOptionsOutput { - /** - * The default is none for exit code 0 and terminate for all other exit - * conditions. If the Job's onTaskFailed property is noaction, then specifying - * this property returns an error and the add Task request fails with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). - * - * Possible values: none, disable, terminate - */ - jobAction?: string; - /** - * Possible values are 'satisfy' (allowing dependent tasks to progress) and - * 'block' (dependent tasks continue to wait). Batch does not yet support - * cancellation of dependent tasks. - * - * Possible values: satisfy, block - */ - dependencyAction?: string; -} - -/** - * A range of exit codes and how the Batch service should respond to exit codes - * within that range. - */ -export interface ExitCodeRangeMappingOutput { - /** The first exit code in the range. */ - start: number; - /** The last exit code in the range. */ - end: number; - /** Specifies how the Batch service responds to a particular exit condition. */ - exitOptions: ExitOptionsOutput; -} - -/** - * A locality hint that can be used by the Batch service to select a Compute Node - * on which to start a Task. - */ -export interface AffinityInformationOutput { - /** - * You can pass the affinityId of a Node to indicate that this Task needs to run - * on that Compute Node. Note that this is just a soft affinity. If the target - * Compute Node is busy or unavailable at the time the Task is scheduled, then the - * Task will be scheduled elsewhere. - */ - affinityId: string; -} - -/** Information about the execution of a Task. */ -export interface TaskExecutionInformationOutput { - /** - * 'Running' corresponds to the running state, so if the Task specifies resource - * files or Packages, then the start time reflects the time at which the Task - * started downloading or deploying these. If the Task has been restarted or - * retried, this is the most recent time at which the Task started running. This - * property is present only for Tasks that are in the running or completed state. - */ - startTime?: string; - /** This property is set only if the Task is in the Completed state. */ - endTime?: string; - /** - * This property is set only if the Task is in the completed state. In general, - * the exit code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. However, if the Batch service terminates the Task (due - * to timeout, or user termination via the API) you may see an operating - * system-defined exit code. - */ - exitCode?: number; - /** This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ - failureInfo?: TaskFailureInformationOutput; - /** - * Task application failures (non-zero exit code) are retried, pre-processing - * errors (the Task could not be run) and file upload errors are not retried. The - * Batch service will retry the Task up to the limit specified by the constraints. - */ - retryCount: number; - /** - * This element is present only if the Task was retried (i.e. retryCount is - * nonzero). If present, this is typically the same as startTime, but may be - * different if the Task has been restarted for reasons other than retry; for - * example, if the Compute Node was rebooted during a retry, then the startTime is - * updated but the lastRetryTime is not. - */ - lastRetryTime?: string; - /** - * When the user removes Compute Nodes from a Pool (by resizing/shrinking the - * pool) or when the Job is being disabled, the user can specify that running - * Tasks on the Compute Nodes be requeued for execution. This count tracks how - * many times the Task has been requeued for these reasons. - */ - requeueCount: number; - /** This property is set only if the requeueCount is nonzero. */ - lastRequeueTime?: string; - /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. - * - * Possible values: success, failure - */ - result?: string; -} - -/** Information about the Compute Node on which a Task ran. */ -export interface ComputeNodeInformationOutput { - /** - * An identifier for the Node on which the Task ran, which can be passed when - * adding a Task to request that the Task be scheduled on this Compute Node. - */ - affinityId?: string; - /** The URL of the Compute Node on which the Task ran. */ - nodeUrl?: string; - /** The ID of the Pool on which the Task ran. */ - poolId?: string; - /** The ID of the Compute Node on which the Task ran. */ - nodeId?: string; - /** The root directory of the Task on the Compute Node. */ - taskRootDirectory?: string; - /** The URL to the root directory of the Task on the Compute Node. */ - taskRootDirectoryUrl?: string; -} - -/** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ -export interface MultiInstanceSettingsOutput { - /** If omitted, the default is 1. */ - numberOfInstances?: number; - /** - * A typical coordination command line launches a background service and verifies - * that the service is ready to process inter-node messages. - */ - coordinationCommandLine: string; - /** - * The difference between common resource files and Task resource files is that - * common resource files are downloaded for all subtasks including the primary, - * whereas Task resource files are downloaded only for the primary. Also note that - * these resource files are not downloaded to the Task working directory, but - * instead are downloaded to the Task root directory (one directory above the - * working directory). There is a maximum size for the list of resource files. - * When the max size is exceeded, the request will fail and the response error - * code will be RequestEntityTooLarge. If this occurs, the collection of - * ResourceFiles must be reduced in size. This can be achieved using .zip files, - * Application Packages, or Docker Containers. - */ - commonResourceFiles?: Array; -} - -/** Resource usage statistics for a Task. */ -export interface TaskStatisticsOutput { - /** The URL of the statistics. */ - url: string; - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by the Task. - */ - userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by the Task. - */ - kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on a - * Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If the Task was retried, this - * includes the wall clock time of all the Task retries. - */ - wallClockTime: string; - /** The total number of disk read operations made by the Task. */ - readIOps: number; - /** The total number of disk write operations made by the Task. */ - writeIOps: number; - /** The total gibibytes read from disk by the Task. */ - readIOGiB: number; - /** The total gibibytes written to disk by the Task. */ - writeIOGiB: number; - /** - * The total wait time of the Task. The wait time for a Task is defined as the - * elapsed time between the creation of the Task and the start of Task execution. - * (If the Task is retried due to failures, the wait time is the time to the most - * recent Task execution.) - */ - waitTime: string; -} - -/** - * Specifies any dependencies of a Task. Any Task that is explicitly specified or - * within a dependency range must complete before the dependant Task will be - * scheduled. - */ -export interface TaskDependenciesOutput { - /** - * The taskIds collection is limited to 64000 characters total (i.e. the combined - * length of all Task IDs). If the taskIds collection exceeds the maximum length, - * the Add Task request fails with error code TaskDependencyListTooLong. In this - * case consider using Task ID ranges instead. - */ - taskIds?: string[]; - /** - * The list of Task ID ranges that this Task depends on. All Tasks in all ranges - * must complete successfully before the dependent Task can be scheduled. - */ - taskIdRanges?: Array; -} - -/** - * The start and end of the range are inclusive. For example, if a range has start - * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - */ -export interface TaskIdRangeOutput { - /** The first Task ID in the range. */ - start: number; - /** The last Task ID in the range. */ - end: number; -} - -/** The result of listing the Tasks in a Job. */ -export interface BatchTaskListResultOutput { - /** The list of Tasks. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** The result of adding a collection of Tasks to a Job. */ -export interface TaskAddCollectionResultOutput { - /** The results of the add Task collection operation. */ - value?: Array; -} - -/** Result for a single Task added as part of an add Task collection operation. */ -export interface TaskAddResultOutput { - /** - * The status of the add Task request. - * - * Possible values: success, clienterror, servererror - */ - status: string; - /** The ID of the Task for which this is the result. */ - taskId: string; - /** - * You can use this to detect whether the Task has changed between requests. In - * particular, you can be pass the ETag with an Update Task request to specify - * that your changes should take effect only if nobody else has modified the Job - * in the meantime. - */ - eTag?: string; - /** The last modified time of the Task. */ - lastModified?: string; - /** The URL of the Task, if the Task was successfully added. */ - location?: string; - /** An error response received from the Azure Batch service. */ - error?: BatchErrorOutput; -} - -/** An error response received from the Azure Batch service. */ -export interface BatchErrorOutput { - /** - * An identifier for the error. Codes are invariant and are intended to be - * consumed programmatically. - */ - code?: string; - /** An error message received in an Azure Batch error response. */ - message?: ErrorMessageOutput; - /** A collection of key-value pairs containing additional details about the error. */ - values?: Array; -} - -/** An error message received in an Azure Batch error response. */ -export interface ErrorMessageOutput { - /** The language code of the error message */ - lang?: string; - /** The text of the message. */ - value?: string; -} - -/** An item of additional information included in an Azure Batch error response. */ -export interface BatchErrorDetailOutput { - /** An identifier specifying the meaning of the Value property. */ - key?: string; - /** The additional information included with the error response. */ - value?: string; -} - -/** The result of listing the subtasks of a Task. */ -export interface BatchTaskListSubtasksResultOutput { - /** The list of subtasks. */ - value?: Array; -} - -/** Information about an Azure Batch subtask. */ -export interface SubtaskInformationOutput { - /** The ID of the subtask. */ - id?: number; - /** Information about the Compute Node on which a Task ran. */ - nodeInfo?: ComputeNodeInformationOutput; - /** - * The time at which the subtask started running. If the subtask has been - * restarted or retried, this is the most recent time at which the subtask started - * running. - */ - startTime?: string; - /** This property is set only if the subtask is in the Completed state. */ - endTime?: string; - /** - * This property is set only if the subtask is in the completed state. In general, - * the exit code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. However, if the Batch service terminates the subtask - * (due to timeout, or user termination via the API) you may see an operating - * system-defined exit code. - */ - exitCode?: number; - /** This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ - failureInfo?: TaskFailureInformationOutput; - /** - * The state of the subtask. - * - * Possible values: preparing, running, completed - */ - state?: string; - /** The time at which the subtask entered its current state. */ - stateTransitionTime?: string; - /** - * This property is not set if the subtask is in its initial running state. - * - * Possible values: preparing, running, completed - */ - previousState?: string; - /** This property is not set if the subtask is in its initial running state. */ - previousStateTransitionTime?: string; - /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. - * - * Possible values: success, failure - */ - result?: string; -} - -/** A Compute Node in the Batch service. */ -export interface ComputeNodeOutput { - /** - * Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a - * Compute Node is removed from a Pool, all of its local files are deleted, and - * the ID is reclaimed and could be reused for new Compute Nodes. - */ - id?: string; - /** The URL of the Compute Node. */ - url?: string; - /** - * The Spot/Low-priority Compute Node has been preempted. Tasks which were running - * on the Compute Node when it was preempted will be rescheduled when another - * Compute Node becomes available. - * - * Possible values: idle, rebooting, reimaging, running, unusable, creating, starting, waitingforstarttask, starttaskfailed, unknown, leavingpool, offline, preempted - */ - state?: string; - /** - * Whether the Compute Node is available for Task scheduling. - * - * Possible values: enabled, disabled - */ - schedulingState?: string; - /** The time at which the Compute Node entered its current state. */ - stateTransitionTime?: string; - /** This property may not be present if the Compute Node state is unusable. */ - lastBootTime?: string; - /** - * This is the time when the Compute Node was initially allocated and doesn't - * change once set. It is not updated when the Compute Node is service healed or - * preempted. - */ - allocationTime?: string; - /** - * Every Compute Node that is added to a Pool is assigned a unique IP address. - * Whenever a Compute Node is removed from a Pool, all of its local files are - * deleted, and the IP address is reclaimed and could be reused for new Compute - * Nodes. - */ - ipAddress?: string; - /** - * Note that this is just a soft affinity. If the target Compute Node is busy or - * unavailable at the time the Task is scheduled, then the Task will be scheduled - * elsewhere. - */ - affinityId?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ - vmSize?: string; - /** - * The total number of Job Tasks completed on the Compute Node. This includes Job - * Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - * Tasks. - */ - totalTasksRun?: number; - /** - * The total number of currently running Job Tasks on the Compute Node. This - * includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job - * Release or Start Tasks. - */ - runningTasksCount?: number; - /** - * The total number of scheduling slots used by currently running Job Tasks on the - * Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job - * Preparation, Job Release or Start Tasks. - */ - runningTaskSlotsCount?: number; - /** - * The total number of Job Tasks which completed successfully (with exitCode 0) on - * the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job - * Preparation, Job Release or Start Tasks. - */ - totalTasksSucceeded?: number; - /** - * This property is present only if at least one Task has run on this Compute Node - * since it was assigned to the Pool. - */ - recentTasks?: Array; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ - startTask?: StartTaskOutput; - /** Information about a StartTask running on a Compute Node. */ - startTaskInfo?: StartTaskInformationOutput; - /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. - */ - certificateReferences?: Array; - /** The list of errors that are currently being encountered by the Compute Node. */ - errors?: Array; - /** - * Whether this Compute Node is a dedicated Compute Node. If false, the Compute - * Node is a Spot/Low-priority Compute Node. - */ - isDedicated?: boolean; - /** The endpoint configuration for the Compute Node. */ - endpointConfiguration?: ComputeNodeEndpointConfigurationOutput; - /** - * The Batch Compute Node agent is a program that runs on each Compute Node in the - * Pool and provides Batch capability on the Compute Node. - */ - nodeAgentInfo?: NodeAgentInformationOutput; - /** Info about the current state of the virtual machine. */ - virtualMachineInfo?: VirtualMachineInfoOutput; -} - -/** Information about a Task running on a Compute Node. */ -export interface TaskInformationOutput { - /** The URL of the Task. */ - taskUrl?: string; - /** The ID of the Job to which the Task belongs. */ - jobId?: string; - /** The ID of the Task. */ - taskId?: string; - /** The ID of the subtask if the Task is a multi-instance Task. */ - subtaskId?: number; - /** - * The state of the Task. - * - * Possible values: active, preparing, running, completed - */ - taskState: string; - /** Information about the execution of a Task. */ - executionInfo?: TaskExecutionInformationOutput; -} - -/** Information about a StartTask running on a Compute Node. */ -export interface StartTaskInformationOutput { - /** - * The state of the StartTask on the Compute Node. - * - * Possible values: running, completed - */ - state: string; - /** - * This value is reset every time the Task is restarted or retried (that is, this - * is the most recent time at which the StartTask started running). - */ - startTime: string; - /** - * This is the end time of the most recent run of the StartTask, if that run has - * completed (even if that run failed and a retry is pending). This element is not - * present if the StartTask is currently running. - */ - endTime?: string; - /** - * This property is set only if the StartTask is in the completed state. In - * general, the exit code for a process reflects the specific convention - * implemented by the application developer for that process. If you use the exit - * code value to make decisions in your code, be sure that you know the exit code - * convention used by the application process. However, if the Batch service - * terminates the StartTask (due to timeout, or user termination via the API) you - * may see an operating system-defined exit code. - */ - exitCode?: number; - /** This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ - failureInfo?: TaskFailureInformationOutput; - /** - * Task application failures (non-zero exit code) are retried, pre-processing - * errors (the Task could not be run) and file upload errors are not retried. The - * Batch service will retry the Task up to the limit specified by the constraints. - */ - retryCount: number; - /** - * This element is present only if the Task was retried (i.e. retryCount is - * nonzero). If present, this is typically the same as startTime, but may be - * different if the Task has been restarted for reasons other than retry; for - * example, if the Compute Node was rebooted during a retry, then the startTime is - * updated but the lastRetryTime is not. - */ - lastRetryTime?: string; - /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. - * - * Possible values: success, failure - */ - result?: string; -} - -/** An error encountered by a Compute Node. */ -export interface ComputeNodeErrorOutput { - /** - * An identifier for the Compute Node error. Codes are invariant and are intended - * to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Compute Node error, intended to be suitable for - * display in a user interface. - */ - message?: string; - /** The list of additional error details related to the Compute Node error. */ - errorDetails?: Array; -} - -/** The endpoint configuration for the Compute Node. */ -export interface ComputeNodeEndpointConfigurationOutput { - /** The list of inbound endpoints that are accessible on the Compute Node. */ - inboundEndpoints: Array; -} - -/** An inbound endpoint on a Compute Node. */ -export interface InboundEndpointOutput { - /** The name of the endpoint. */ - name: string; - /** - * The protocol of the endpoint. - * - * Possible values: tcp, udp - */ - protocol: string; - /** The public IP address of the Compute Node. */ - publicIPAddress: string; - /** The public fully qualified domain name for the Compute Node. */ - publicFQDN: string; - /** The public port number of the endpoint. */ - frontendPort: number; - /** The backend port number of the endpoint. */ - backendPort: number; -} - -/** - * The Batch Compute Node agent is a program that runs on each Compute Node in the - * Pool and provides Batch capability on the Compute Node. - */ -export interface NodeAgentInformationOutput { - /** - * This version number can be checked against the Compute Node agent release notes - * located at - * https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. - */ - version: string; - /** - * This is the most recent time that the Compute Node agent was updated to a new - * version. - */ - lastUpdateTime: string; -} - -/** Info about the current state of the virtual machine. */ -export interface VirtualMachineInfoOutput { - /** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ - imageReference?: ImageReferenceOutput; -} - -/** The remote login settings for a Compute Node. */ -export interface ComputeNodeGetRemoteLoginSettingsResultOutput { - /** The IP address used for remote login to the Compute Node. */ - readonly remoteLoginIPAddress: string; - /** The port used for remote login to the Compute Node. */ - remoteLoginPort: number; -} - -/** The result of uploading Batch service log files from a specific Compute Node. */ -export interface UploadBatchServiceLogsResultOutput { - /** - * The virtual directory name is part of the blob name for each log file uploaded, - * and it is built based poolId, nodeId and a unique identifier. - */ - readonly virtualDirectoryName: string; - /** The number of log files which will be uploaded. */ - numberOfFilesUploaded: number; -} - -/** The result of listing the Compute Nodes in a Pool. */ -export interface ComputeNodeListResultOutput { - /** The list of Compute Nodes. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** The configuration for virtual machine extension instance view. */ -export interface NodeVMExtensionOutput { - /** The provisioning state of the virtual machine extension. */ - provisioningState?: string; - /** The configuration for virtual machine extensions. */ - vmExtension?: VMExtensionOutput; - /** The vm extension instance view. */ - instanceView?: VMExtensionInstanceViewOutput; -} - -/** The vm extension instance view. */ -export interface VMExtensionInstanceViewOutput { - /** The name of the vm extension instance view. */ - name?: string; - /** The resource status information. */ - statuses?: Array; - /** The resource status information. */ - subStatuses?: Array; -} - -/** The instance view status. */ -export interface InstanceViewStatusOutput { - /** The status code. */ - code?: string; - /** The localized label for the status. */ - displayStatus?: string; - /** - * Level code. - * - * Possible values: Error, Info, Warning - */ - level?: string; - /** The detailed status message. */ - message?: string; - /** The time of the status. */ - time?: string; -} - -/** The result of listing the Compute Node extensions in a Node. */ -export interface NodeVMExtensionListOutput { - /** The list of Compute Node extensions. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** Paged collection of PoolUsageMetrics items */ -export type PoolUsageMetricsListOutput = Paged; diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/paginateHelper.ts b/packages/typespec-test/test/batch/generated/typespec-ts/src/paginateHelper.ts deleted file mode 100644 index 1c9af35b1e..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/paginateHelper.ts +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { - getPagedAsyncIterator, - PagedAsyncIterableIterator, - PagedResult, -} from "@azure/core-paging"; -import { - Client, - createRestError, - PathUncheckedResponse, -} from "@azure-rest/core-client"; - -/** - * Helper type to extract the type of an array - */ -export type GetArrayType = T extends Array ? TData : never; - -/** - * The type of a custom function that defines how to get a page and a link to the next one if any. - */ -export type GetPage = ( - pageLink: string, - maxPageSize?: number -) => Promise<{ - page: TPage; - nextPageLink?: string; -}>; - -/** - * Options for the paging helper - */ -export interface PagingOptions { - /** - * Custom function to extract pagination details for crating the PagedAsyncIterableIterator - */ - customGetPage?: GetPage[]>; -} - -/** - * Helper type to infer the Type of the paged elements from the response type - * This type is generated based on the swagger information for x-ms-pageable - * specifically on the itemName property which indicates the property of the response - * where the page items are found. The default value is `value`. - * This type will allow us to provide strongly typed Iterator based on the response we get as second parameter - */ -export type PaginateReturn = TResult extends { - body: { value?: infer TPage }; -} - ? GetArrayType - : Array; - -/** - * Helper to paginate results from an initial response that follows the specification of Autorest `x-ms-pageable` extension - * @param client - Client to use for sending the next page requests - * @param initialResponse - Initial response containing the nextLink and current page of elements - * @param customGetPage - Optional - Function to define how to extract the page and next link to be used to paginate the results - * @returns - PagedAsyncIterableIterator to iterate the elements - */ -export function paginate( - client: Client, - initialResponse: TResponse, - options: PagingOptions = {} -): PagedAsyncIterableIterator> { - // Extract element type from initial response - type TElement = PaginateReturn; - let firstRun = true; - const itemName = "value"; - const nextLinkName = "nextLink"; - const { customGetPage } = options; - const pagedResult: PagedResult = { - firstPageLink: "", - getPage: - typeof customGetPage === "function" - ? customGetPage - : async (pageLink: string) => { - const result = firstRun - ? initialResponse - : await client.pathUnchecked(pageLink).get(); - firstRun = false; - checkPagingRequest(result); - const nextLink = getNextLink(result.body, nextLinkName); - const values = getElements(result.body, itemName); - return { - page: values, - nextPageLink: nextLink, - }; - }, - }; - - return getPagedAsyncIterator(pagedResult); -} - -/** - * Gets for the value of nextLink in the body - */ -function getNextLink(body: unknown, nextLinkName?: string): string | undefined { - if (!nextLinkName) { - return undefined; - } - - const nextLink = (body as Record)[nextLinkName]; - - if (typeof nextLink !== "string" && typeof nextLink !== "undefined") { - throw new Error( - `Body Property ${nextLinkName} should be a string or undefined` - ); - } - - return nextLink; -} - -/** - * Gets the elements of the current request in the body. - */ -function getElements(body: unknown, itemName: string): T[] { - const value = (body as Record)[itemName] as T[]; - - // value has to be an array according to the x-ms-pageable extension. - // The fact that this must be an array is used above to calculate the - // type of elements in the page in PaginateReturn - if (!Array.isArray(value)) { - throw new Error( - `Couldn't paginate response\n Body doesn't contain an array property with name: ${itemName}` - ); - } - - return value ?? []; -} - -/** - * Checks if a request failed - */ -function checkPagingRequest(response: PathUncheckedResponse): void { - const Http2xxStatusCodes = [ - "200", - "201", - "202", - "203", - "204", - "205", - "206", - "207", - "208", - "226", - ]; - if (!Http2xxStatusCodes.includes(response.status)) { - throw createRestError( - `Pagination failed with unexpected statusCode ${response.status}`, - response - ); - } -} diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/parameters.ts b/packages/typespec-test/test/batch/generated/typespec-ts/src/parameters.ts deleted file mode 100644 index e0a4c1fd3a..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/parameters.ts +++ /dev/null @@ -1,3879 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { RawHttpHeadersInput } from "@azure/core-rest-pipeline"; -import { RequestParameters } from "@azure-rest/core-client"; -import { - BatchPool, - BatchPoolEnableAutoScaleParameters, - BatchPoolEvaluateAutoScaleParameters, - BatchPoolResizeParameters, - NodeRemoveParameters, - BatchJob, - BatchJobDisableParameters, - BatchJobTerminateParameters, - Certificate, - BatchJobSchedule, - BatchTask, - BatchTaskCollection, - ComputeNodeUser, - NodeUpdateUserParameters, - NodeRebootParameters, - NodeReimageParameters, - NodeDisableSchedulingParameters, - UploadBatchServiceLogsConfiguration, -} from "./models"; - -export interface ApplicationOperationsListHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface ApplicationOperationsListQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ApplicationOperationsListQueryParam { - queryParameters?: ApplicationOperationsListQueryParamProperties; -} - -export interface ApplicationOperationsListHeaderParam { - headers?: RawHttpHeadersInput & ApplicationOperationsListHeaders; -} - -export type ApplicationOperationsListParameters = - ApplicationOperationsListQueryParam & - ApplicationOperationsListHeaderParam & - RequestParameters; -export type ApplicationOperationsGetParameters = RequestParameters; -export type PoolListUsageMetricsParameters = RequestParameters; - -export interface PoolGetAllLifetimeStatisticsHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface PoolGetAllLifetimeStatisticsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolGetAllLifetimeStatisticsQueryParam { - queryParameters?: PoolGetAllLifetimeStatisticsQueryParamProperties; -} - -export interface PoolGetAllLifetimeStatisticsHeaderParam { - headers?: RawHttpHeadersInput & PoolGetAllLifetimeStatisticsHeaders; -} - -export type PoolGetAllLifetimeStatisticsParameters = - PoolGetAllLifetimeStatisticsQueryParam & - PoolGetAllLifetimeStatisticsHeaderParam & - RequestParameters; - -export interface PoolAddHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface PoolAddBodyParam { - /** The Pool to be added. */ - body: BatchPool; -} - -export interface PoolAddQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolAddQueryParam { - queryParameters?: PoolAddQueryParamProperties; -} - -export interface PoolAddHeaderParam { - headers?: RawHttpHeadersInput & PoolAddHeaders; -} - -export type PoolAddParameters = PoolAddQueryParam & - PoolAddHeaderParam & - PoolAddBodyParam & - RequestParameters; - -export interface PoolListHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface PoolListQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. - */ - $filter?: string; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; -} - -export interface PoolListQueryParam { - queryParameters?: PoolListQueryParamProperties; -} - -export interface PoolListHeaderParam { - headers?: RawHttpHeadersInput & PoolListHeaders; -} - -export type PoolListParameters = PoolListQueryParam & - PoolListHeaderParam & - RequestParameters; - -export interface PoolDeleteHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface PoolDeleteQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolDeleteQueryParam { - queryParameters?: PoolDeleteQueryParamProperties; -} - -export interface PoolDeleteHeaderParam { - headers?: RawHttpHeadersInput & PoolDeleteHeaders; -} - -export type PoolDeleteParameters = PoolDeleteQueryParam & - PoolDeleteHeaderParam & - RequestParameters; - -export interface PoolExistsHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface PoolExistsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolExistsQueryParam { - queryParameters?: PoolExistsQueryParamProperties; -} - -export interface PoolExistsHeaderParam { - headers?: RawHttpHeadersInput & PoolExistsHeaders; -} - -export type PoolExistsParameters = PoolExistsQueryParam & - PoolExistsHeaderParam & - RequestParameters; - -export interface PoolGetHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface PoolGetQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select: string; - /** An OData $expand clause. */ - $expand: string; -} - -export interface PoolGetQueryParam { - queryParameters: PoolGetQueryParamProperties; -} - -export interface PoolGetHeaderParam { - headers?: RawHttpHeadersInput & PoolGetHeaders; -} - -export type PoolGetParameters = PoolGetQueryParam & - PoolGetHeaderParam & - RequestParameters; - -export interface PoolPatchHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface PoolPatchBodyParam { - /** The parameters for the request. */ - body: BatchPool; -} - -export interface PoolPatchQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolPatchQueryParam { - queryParameters?: PoolPatchQueryParamProperties; -} - -export interface PoolPatchHeaderParam { - headers?: RawHttpHeadersInput & PoolPatchHeaders; -} - -export type PoolPatchParameters = PoolPatchQueryParam & - PoolPatchHeaderParam & - PoolPatchBodyParam & - RequestParameters; - -export interface PoolDisableAutoScaleHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface PoolDisableAutoScaleQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolDisableAutoScaleQueryParam { - queryParameters?: PoolDisableAutoScaleQueryParamProperties; -} - -export interface PoolDisableAutoScaleHeaderParam { - headers?: RawHttpHeadersInput & PoolDisableAutoScaleHeaders; -} - -export type PoolDisableAutoScaleParameters = PoolDisableAutoScaleQueryParam & - PoolDisableAutoScaleHeaderParam & - RequestParameters; - -export interface PoolEnableAutoScaleHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface PoolEnableAutoScaleBodyParam { - /** The parameters for the request. */ - body: BatchPoolEnableAutoScaleParameters; -} - -export interface PoolEnableAutoScaleQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolEnableAutoScaleQueryParam { - queryParameters?: PoolEnableAutoScaleQueryParamProperties; -} - -export interface PoolEnableAutoScaleHeaderParam { - headers?: RawHttpHeadersInput & PoolEnableAutoScaleHeaders; -} - -export type PoolEnableAutoScaleParameters = PoolEnableAutoScaleQueryParam & - PoolEnableAutoScaleHeaderParam & - PoolEnableAutoScaleBodyParam & - RequestParameters; - -export interface PoolEvaluateAutoScaleHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface PoolEvaluateAutoScaleBodyParam { - /** The parameters for the request. */ - body: BatchPoolEvaluateAutoScaleParameters; -} - -export interface PoolEvaluateAutoScaleQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolEvaluateAutoScaleQueryParam { - queryParameters?: PoolEvaluateAutoScaleQueryParamProperties; -} - -export interface PoolEvaluateAutoScaleHeaderParam { - headers?: RawHttpHeadersInput & PoolEvaluateAutoScaleHeaders; -} - -export type PoolEvaluateAutoScaleParameters = PoolEvaluateAutoScaleQueryParam & - PoolEvaluateAutoScaleHeaderParam & - PoolEvaluateAutoScaleBodyParam & - RequestParameters; - -export interface PoolResizeHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface PoolResizeBodyParam { - /** The parameters for the request. */ - body: BatchPoolResizeParameters; -} - -export interface PoolResizeQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolResizeQueryParam { - queryParameters?: PoolResizeQueryParamProperties; -} - -export interface PoolResizeHeaderParam { - headers?: RawHttpHeadersInput & PoolResizeHeaders; -} - -export type PoolResizeParameters = PoolResizeQueryParam & - PoolResizeHeaderParam & - PoolResizeBodyParam & - RequestParameters; - -export interface PoolStopResizeHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface PoolStopResizeQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolStopResizeQueryParam { - queryParameters?: PoolStopResizeQueryParamProperties; -} - -export interface PoolStopResizeHeaderParam { - headers?: RawHttpHeadersInput & PoolStopResizeHeaders; -} - -export type PoolStopResizeParameters = PoolStopResizeQueryParam & - PoolStopResizeHeaderParam & - RequestParameters; - -export interface PoolUpdatePropertiesHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface PoolUpdatePropertiesBodyParam { - /** The parameters for the request. */ - body: BatchPool; -} - -export interface PoolUpdatePropertiesQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolUpdatePropertiesQueryParam { - queryParameters?: PoolUpdatePropertiesQueryParamProperties; -} - -export interface PoolUpdatePropertiesHeaderParam { - headers?: RawHttpHeadersInput & PoolUpdatePropertiesHeaders; -} - -export type PoolUpdatePropertiesParameters = PoolUpdatePropertiesQueryParam & - PoolUpdatePropertiesHeaderParam & - PoolUpdatePropertiesBodyParam & - RequestParameters; - -export interface PoolRemoveNodesHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface PoolRemoveNodesBodyParam { - /** The parameters for the request. */ - body: NodeRemoveParameters; -} - -export interface PoolRemoveNodesQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface PoolRemoveNodesQueryParam { - queryParameters?: PoolRemoveNodesQueryParamProperties; -} - -export interface PoolRemoveNodesHeaderParam { - headers?: RawHttpHeadersInput & PoolRemoveNodesHeaders; -} - -export type PoolRemoveNodesParameters = PoolRemoveNodesQueryParam & - PoolRemoveNodesHeaderParam & - PoolRemoveNodesBodyParam & - RequestParameters; - -export interface AccountListSupportedImagesHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface AccountListSupportedImagesQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. - */ - $filter?: string; -} - -export interface AccountListSupportedImagesQueryParam { - queryParameters?: AccountListSupportedImagesQueryParamProperties; -} - -export interface AccountListSupportedImagesHeaderParam { - headers?: RawHttpHeadersInput & AccountListSupportedImagesHeaders; -} - -export type AccountListSupportedImagesParameters = - AccountListSupportedImagesQueryParam & - AccountListSupportedImagesHeaderParam & - RequestParameters; - -export interface AccountListPoolNodeCountsHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface AccountListPoolNodeCountsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. - */ - $filter?: string; -} - -export interface AccountListPoolNodeCountsQueryParam { - queryParameters?: AccountListPoolNodeCountsQueryParamProperties; -} - -export interface AccountListPoolNodeCountsHeaderParam { - headers?: RawHttpHeadersInput & AccountListPoolNodeCountsHeaders; -} - -export type AccountListPoolNodeCountsParameters = - AccountListPoolNodeCountsQueryParam & - AccountListPoolNodeCountsHeaderParam & - RequestParameters; - -export interface JobGetAllLifetimeStatisticsHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface JobGetAllLifetimeStatisticsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobGetAllLifetimeStatisticsQueryParam { - queryParameters?: JobGetAllLifetimeStatisticsQueryParamProperties; -} - -export interface JobGetAllLifetimeStatisticsHeaderParam { - headers?: RawHttpHeadersInput & JobGetAllLifetimeStatisticsHeaders; -} - -export type JobGetAllLifetimeStatisticsParameters = - JobGetAllLifetimeStatisticsQueryParam & - JobGetAllLifetimeStatisticsHeaderParam & - RequestParameters; - -export interface JobDeleteHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobDeleteQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobDeleteQueryParam { - queryParameters?: JobDeleteQueryParamProperties; -} - -export interface JobDeleteHeaderParam { - headers?: RawHttpHeadersInput & JobDeleteHeaders; -} - -export type JobDeleteParameters = JobDeleteQueryParam & - JobDeleteHeaderParam & - RequestParameters; - -export interface JobGetHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobGetQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; -} - -export interface JobGetQueryParam { - queryParameters?: JobGetQueryParamProperties; -} - -export interface JobGetHeaderParam { - headers?: RawHttpHeadersInput & JobGetHeaders; -} - -export type JobGetParameters = JobGetQueryParam & - JobGetHeaderParam & - RequestParameters; - -export interface JobPatchHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobPatchBodyParam { - /** The parameters for the request. */ - body: BatchJob; -} - -export interface JobPatchQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobPatchQueryParam { - queryParameters?: JobPatchQueryParamProperties; -} - -export interface JobPatchHeaderParam { - headers?: RawHttpHeadersInput & JobPatchHeaders; -} - -export type JobPatchParameters = JobPatchQueryParam & - JobPatchHeaderParam & - JobPatchBodyParam & - RequestParameters; - -export interface JobUpdateHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobUpdateBodyParam { - /** The parameters for the request. */ - body: BatchJob; -} - -export interface JobUpdateQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobUpdateQueryParam { - queryParameters?: JobUpdateQueryParamProperties; -} - -export interface JobUpdateHeaderParam { - headers?: RawHttpHeadersInput & JobUpdateHeaders; -} - -export type JobUpdateParameters = JobUpdateQueryParam & - JobUpdateHeaderParam & - JobUpdateBodyParam & - RequestParameters; - -export interface JobDisableHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobDisableBodyParam { - /** The parameters for the request. */ - body: BatchJobDisableParameters; -} - -export interface JobDisableQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobDisableQueryParam { - queryParameters?: JobDisableQueryParamProperties; -} - -export interface JobDisableHeaderParam { - headers?: RawHttpHeadersInput & JobDisableHeaders; -} - -export type JobDisableParameters = JobDisableQueryParam & - JobDisableHeaderParam & - JobDisableBodyParam & - RequestParameters; - -export interface JobEnableHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobEnableQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobEnableQueryParam { - queryParameters?: JobEnableQueryParamProperties; -} - -export interface JobEnableHeaderParam { - headers?: RawHttpHeadersInput & JobEnableHeaders; -} - -export type JobEnableParameters = JobEnableQueryParam & - JobEnableHeaderParam & - RequestParameters; - -export interface JobTerminateHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobTerminateBodyParam { - /** The parameters for the request. */ - body: BatchJobTerminateParameters; -} - -export interface JobTerminateQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobTerminateQueryParam { - queryParameters?: JobTerminateQueryParamProperties; -} - -export interface JobTerminateHeaderParam { - headers?: RawHttpHeadersInput & JobTerminateHeaders; -} - -export type JobTerminateParameters = JobTerminateQueryParam & - JobTerminateHeaderParam & - JobTerminateBodyParam & - RequestParameters; - -export interface JobAddHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface JobAddBodyParam { - /** The Job to be added. */ - body: BatchJob; -} - -export interface JobAddQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobAddQueryParam { - queryParameters?: JobAddQueryParamProperties; -} - -export interface JobAddHeaderParam { - headers?: RawHttpHeadersInput & JobAddHeaders; -} - -export type JobAddParameters = JobAddQueryParam & - JobAddHeaderParam & - JobAddBodyParam & - RequestParameters; - -export interface JobListHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface JobListQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. - */ - $filter: string; - /** An OData $select clause. */ - $select: string; - /** An OData $expand clause. */ - $expand: string; -} - -export interface JobListQueryParam { - queryParameters: JobListQueryParamProperties; -} - -export interface JobListHeaderParam { - headers?: RawHttpHeadersInput & JobListHeaders; -} - -export type JobListParameters = JobListQueryParam & - JobListHeaderParam & - RequestParameters; - -export interface JobListFromJobScheduleHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface JobListFromJobScheduleQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. - */ - $filter: string; - /** An OData $select clause. */ - $select: string; - /** An OData $expand clause. */ - $expand: string; -} - -export interface JobListFromJobScheduleQueryParam { - queryParameters: JobListFromJobScheduleQueryParamProperties; -} - -export interface JobListFromJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobListFromJobScheduleHeaders; -} - -export type JobListFromJobScheduleParameters = - JobListFromJobScheduleQueryParam & - JobListFromJobScheduleHeaderParam & - RequestParameters; - -export interface JobListPreparationAndReleaseTaskStatusHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface JobListPreparationAndReleaseTaskStatusQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. - */ - $filter: string; - /** An OData $select clause. */ - $select: string; -} - -export interface JobListPreparationAndReleaseTaskStatusQueryParam { - queryParameters: JobListPreparationAndReleaseTaskStatusQueryParamProperties; -} - -export interface JobListPreparationAndReleaseTaskStatusHeaderParam { - headers?: RawHttpHeadersInput & JobListPreparationAndReleaseTaskStatusHeaders; -} - -export type JobListPreparationAndReleaseTaskStatusParameters = - JobListPreparationAndReleaseTaskStatusQueryParam & - JobListPreparationAndReleaseTaskStatusHeaderParam & - RequestParameters; - -export interface JobGetTaskCountsHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface JobGetTaskCountsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobGetTaskCountsQueryParam { - queryParameters?: JobGetTaskCountsQueryParamProperties; -} - -export interface JobGetTaskCountsHeaderParam { - headers?: RawHttpHeadersInput & JobGetTaskCountsHeaders; -} - -export type JobGetTaskCountsParameters = JobGetTaskCountsQueryParam & - JobGetTaskCountsHeaderParam & - RequestParameters; - -export interface CertificateOperationsAddHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface CertificateOperationsAddBodyParam { - /** The Certificate to be added. */ - body: Certificate; -} - -export interface CertificateOperationsAddQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface CertificateOperationsAddQueryParam { - queryParameters?: CertificateOperationsAddQueryParamProperties; -} - -export interface CertificateOperationsAddHeaderParam { - headers?: RawHttpHeadersInput & CertificateOperationsAddHeaders; -} - -export type CertificateOperationsAddParameters = - CertificateOperationsAddQueryParam & - CertificateOperationsAddHeaderParam & - CertificateOperationsAddBodyParam & - RequestParameters; - -export interface CertificateOperationsListHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface CertificateOperationsListQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. - */ - $filter: string; - /** An OData $select clause. */ - $select: string; -} - -export interface CertificateOperationsListQueryParam { - queryParameters: CertificateOperationsListQueryParamProperties; -} - -export interface CertificateOperationsListHeaderParam { - headers?: RawHttpHeadersInput & CertificateOperationsListHeaders; -} - -export type CertificateOperationsListParameters = - CertificateOperationsListQueryParam & - CertificateOperationsListHeaderParam & - RequestParameters; - -export interface CertificateOperationsCancelDeletionHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface CertificateOperationsCancelDeletionQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface CertificateOperationsCancelDeletionQueryParam { - queryParameters?: CertificateOperationsCancelDeletionQueryParamProperties; -} - -export interface CertificateOperationsCancelDeletionHeaderParam { - headers?: RawHttpHeadersInput & CertificateOperationsCancelDeletionHeaders; -} - -export type CertificateOperationsCancelDeletionParameters = - CertificateOperationsCancelDeletionQueryParam & - CertificateOperationsCancelDeletionHeaderParam & - RequestParameters; - -export interface CertificateOperationsDeleteHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface CertificateOperationsDeleteQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface CertificateOperationsDeleteQueryParam { - queryParameters?: CertificateOperationsDeleteQueryParamProperties; -} - -export interface CertificateOperationsDeleteHeaderParam { - headers?: RawHttpHeadersInput & CertificateOperationsDeleteHeaders; -} - -export type CertificateOperationsDeleteParameters = - CertificateOperationsDeleteQueryParam & - CertificateOperationsDeleteHeaderParam & - RequestParameters; - -export interface CertificateOperationsGetHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface CertificateOperationsGetQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select: string; -} - -export interface CertificateOperationsGetQueryParam { - queryParameters: CertificateOperationsGetQueryParamProperties; -} - -export interface CertificateOperationsGetHeaderParam { - headers?: RawHttpHeadersInput & CertificateOperationsGetHeaders; -} - -export type CertificateOperationsGetParameters = - CertificateOperationsGetQueryParam & - CertificateOperationsGetHeaderParam & - RequestParameters; - -export interface FileDeleteFromTaskHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface FileDeleteFromTaskQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * Whether to delete children of a directory. If the filePath parameter represents - * a directory instead of a file, you can set recursive to true to delete the - * directory and all of the files and subdirectories in it. If recursive is false - * then the directory must be empty or deletion will fail. - */ - recursive: boolean; -} - -export interface FileDeleteFromTaskQueryParam { - queryParameters: FileDeleteFromTaskQueryParamProperties; -} - -export interface FileDeleteFromTaskHeaderParam { - headers?: RawHttpHeadersInput & FileDeleteFromTaskHeaders; -} - -export type FileDeleteFromTaskParameters = FileDeleteFromTaskQueryParam & - FileDeleteFromTaskHeaderParam & - RequestParameters; - -export interface FileGetFromTaskHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; - /** - * The byte range to be retrieved. The default is to retrieve the entire file. The - * format is bytes=startRange-endRange. - */ - "ocp-range"?: string; -} - -export interface FileGetFromTaskQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface FileGetFromTaskQueryParam { - queryParameters?: FileGetFromTaskQueryParamProperties; -} - -export interface FileGetFromTaskHeaderParam { - headers?: RawHttpHeadersInput & FileGetFromTaskHeaders; -} - -export type FileGetFromTaskParameters = FileGetFromTaskQueryParam & - FileGetFromTaskHeaderParam & - RequestParameters; - -export interface FileGetPropertiesFromTaskHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface FileGetPropertiesFromTaskQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface FileGetPropertiesFromTaskQueryParam { - queryParameters?: FileGetPropertiesFromTaskQueryParamProperties; -} - -export interface FileGetPropertiesFromTaskHeaderParam { - headers?: RawHttpHeadersInput & FileGetPropertiesFromTaskHeaders; -} - -export type FileGetPropertiesFromTaskParameters = - FileGetPropertiesFromTaskQueryParam & - FileGetPropertiesFromTaskHeaderParam & - RequestParameters; - -export interface FileDeleteFromComputeNodeHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface FileDeleteFromComputeNodeQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * Whether to delete children of a directory. If the filePath parameter represents - * a directory instead of a file, you can set recursive to true to delete the - * directory and all of the files and subdirectories in it. If recursive is false - * then the directory must be empty or deletion will fail. - */ - recursive?: boolean; -} - -export interface FileDeleteFromComputeNodeQueryParam { - queryParameters?: FileDeleteFromComputeNodeQueryParamProperties; -} - -export interface FileDeleteFromComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & FileDeleteFromComputeNodeHeaders; -} - -export type FileDeleteFromComputeNodeParameters = - FileDeleteFromComputeNodeQueryParam & - FileDeleteFromComputeNodeHeaderParam & - RequestParameters; - -export interface FileGetFromComputeNodeHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; - /** - * The byte range to be retrieved. The default is to retrieve the entire file. The - * format is bytes=startRange-endRange. - */ - "ocp-range"?: string; -} - -export interface FileGetFromComputeNodeQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface FileGetFromComputeNodeQueryParam { - queryParameters?: FileGetFromComputeNodeQueryParamProperties; -} - -export interface FileGetFromComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & FileGetFromComputeNodeHeaders; -} - -export type FileGetFromComputeNodeParameters = - FileGetFromComputeNodeQueryParam & - FileGetFromComputeNodeHeaderParam & - RequestParameters; - -export interface FileGetPropertiesFromComputeNodeHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface FileGetPropertiesFromComputeNodeQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface FileGetPropertiesFromComputeNodeQueryParam { - queryParameters?: FileGetPropertiesFromComputeNodeQueryParamProperties; -} - -export interface FileGetPropertiesFromComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & FileGetPropertiesFromComputeNodeHeaders; -} - -export type FileGetPropertiesFromComputeNodeParameters = - FileGetPropertiesFromComputeNodeQueryParam & - FileGetPropertiesFromComputeNodeHeaderParam & - RequestParameters; - -export interface FileListFromTaskHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface FileListFromTaskQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. - */ - $filter: string; - /** - * Whether to list children of the Task directory. This parameter can be used in - * combination with the filter parameter to list specific type of files. - */ - recursive: boolean; -} - -export interface FileListFromTaskQueryParam { - queryParameters: FileListFromTaskQueryParamProperties; -} - -export interface FileListFromTaskHeaderParam { - headers?: RawHttpHeadersInput & FileListFromTaskHeaders; -} - -export type FileListFromTaskParameters = FileListFromTaskQueryParam & - FileListFromTaskHeaderParam & - RequestParameters; - -export interface FileListFromComputeNodeHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface FileListFromComputeNodeQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. - */ - $filter: string; - /** Whether to list children of a directory. */ - recursive: boolean; -} - -export interface FileListFromComputeNodeQueryParam { - queryParameters: FileListFromComputeNodeQueryParamProperties; -} - -export interface FileListFromComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & FileListFromComputeNodeHeaders; -} - -export type FileListFromComputeNodeParameters = - FileListFromComputeNodeQueryParam & - FileListFromComputeNodeHeaderParam & - RequestParameters; - -export interface JobScheduleExistsHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobScheduleExistsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobScheduleExistsQueryParam { - queryParameters?: JobScheduleExistsQueryParamProperties; -} - -export interface JobScheduleExistsHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleExistsHeaders; -} - -export type JobScheduleExistsParameters = JobScheduleExistsQueryParam & - JobScheduleExistsHeaderParam & - RequestParameters; - -export interface JobScheduleDeleteHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobScheduleDeleteQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobScheduleDeleteQueryParam { - queryParameters?: JobScheduleDeleteQueryParamProperties; -} - -export interface JobScheduleDeleteHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleDeleteHeaders; -} - -export type JobScheduleDeleteParameters = JobScheduleDeleteQueryParam & - JobScheduleDeleteHeaderParam & - RequestParameters; - -export interface JobScheduleGetHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobScheduleGetQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; -} - -export interface JobScheduleGetQueryParam { - queryParameters?: JobScheduleGetQueryParamProperties; -} - -export interface JobScheduleGetHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleGetHeaders; -} - -export type JobScheduleGetParameters = JobScheduleGetQueryParam & - JobScheduleGetHeaderParam & - RequestParameters; - -export interface JobSchedulePatchHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobSchedulePatchBodyParam { - /** The parameters for the request. */ - body: BatchJobSchedule; -} - -export interface JobSchedulePatchQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobSchedulePatchQueryParam { - queryParameters?: JobSchedulePatchQueryParamProperties; -} - -export interface JobSchedulePatchHeaderParam { - headers?: RawHttpHeadersInput & JobSchedulePatchHeaders; -} - -export type JobSchedulePatchParameters = JobSchedulePatchQueryParam & - JobSchedulePatchHeaderParam & - JobSchedulePatchBodyParam & - RequestParameters; - -export interface JobScheduleUpdateHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobScheduleUpdateBodyParam { - /** The parameters for the request. */ - body: BatchJobSchedule; -} - -export interface JobScheduleUpdateQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobScheduleUpdateQueryParam { - queryParameters?: JobScheduleUpdateQueryParamProperties; -} - -export interface JobScheduleUpdateHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleUpdateHeaders; -} - -export type JobScheduleUpdateParameters = JobScheduleUpdateQueryParam & - JobScheduleUpdateHeaderParam & - JobScheduleUpdateBodyParam & - RequestParameters; - -export interface JobScheduleDisableHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobScheduleDisableQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobScheduleDisableQueryParam { - queryParameters?: JobScheduleDisableQueryParamProperties; -} - -export interface JobScheduleDisableHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleDisableHeaders; -} - -export type JobScheduleDisableParameters = JobScheduleDisableQueryParam & - JobScheduleDisableHeaderParam & - RequestParameters; - -export interface JobScheduleEnableHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobScheduleEnableQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobScheduleEnableQueryParam { - queryParameters?: JobScheduleEnableQueryParamProperties; -} - -export interface JobScheduleEnableHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleEnableHeaders; -} - -export type JobScheduleEnableParameters = JobScheduleEnableQueryParam & - JobScheduleEnableHeaderParam & - RequestParameters; - -export interface JobScheduleTerminateHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface JobScheduleTerminateQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobScheduleTerminateQueryParam { - queryParameters?: JobScheduleTerminateQueryParamProperties; -} - -export interface JobScheduleTerminateHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleTerminateHeaders; -} - -export type JobScheduleTerminateParameters = JobScheduleTerminateQueryParam & - JobScheduleTerminateHeaderParam & - RequestParameters; - -export interface JobScheduleAddHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface JobScheduleAddBodyParam { - /** The Job Schedule to be added. */ - body: BatchJobSchedule; -} - -export interface JobScheduleAddQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobScheduleAddQueryParam { - queryParameters?: JobScheduleAddQueryParamProperties; -} - -export interface JobScheduleAddHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleAddHeaders; -} - -export type JobScheduleAddParameters = JobScheduleAddQueryParam & - JobScheduleAddHeaderParam & - JobScheduleAddBodyParam & - RequestParameters; - -export interface JobScheduleListHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface JobScheduleListQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. - */ - $filter?: string; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; -} - -export interface JobScheduleListQueryParam { - queryParameters?: JobScheduleListQueryParamProperties; -} - -export interface JobScheduleListHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleListHeaders; -} - -export type JobScheduleListParameters = JobScheduleListQueryParam & - JobScheduleListHeaderParam & - RequestParameters; - -export interface TaskAddHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface TaskAddBodyParam { - /** The Task to be added. */ - body: BatchTask; -} - -export interface TaskAddQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface TaskAddQueryParam { - queryParameters?: TaskAddQueryParamProperties; -} - -export interface TaskAddHeaderParam { - headers?: RawHttpHeadersInput & TaskAddHeaders; -} - -export type TaskAddParameters = TaskAddQueryParam & - TaskAddHeaderParam & - TaskAddBodyParam & - RequestParameters; - -export interface TaskListHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface TaskListQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. - */ - $filter?: string; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; -} - -export interface TaskListQueryParam { - queryParameters?: TaskListQueryParamProperties; -} - -export interface TaskListHeaderParam { - headers?: RawHttpHeadersInput & TaskListHeaders; -} - -export type TaskListParameters = TaskListQueryParam & - TaskListHeaderParam & - RequestParameters; - -export interface TaskAddCollectionHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface TaskAddCollectionBodyParam { - /** The Tasks to be added. */ - body: BatchTaskCollection; -} - -export interface TaskAddCollectionQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface TaskAddCollectionQueryParam { - queryParameters?: TaskAddCollectionQueryParamProperties; -} - -export interface TaskAddCollectionHeaderParam { - headers?: RawHttpHeadersInput & TaskAddCollectionHeaders; -} - -export type TaskAddCollectionParameters = TaskAddCollectionQueryParam & - TaskAddCollectionHeaderParam & - TaskAddCollectionBodyParam & - RequestParameters; - -export interface TaskDeleteHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface TaskDeleteQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface TaskDeleteQueryParam { - queryParameters?: TaskDeleteQueryParamProperties; -} - -export interface TaskDeleteHeaderParam { - headers?: RawHttpHeadersInput & TaskDeleteHeaders; -} - -export type TaskDeleteParameters = TaskDeleteQueryParam & - TaskDeleteHeaderParam & - RequestParameters; - -export interface TaskGetHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface TaskGetQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; -} - -export interface TaskGetQueryParam { - queryParameters?: TaskGetQueryParamProperties; -} - -export interface TaskGetHeaderParam { - headers?: RawHttpHeadersInput & TaskGetHeaders; -} - -export type TaskGetParameters = TaskGetQueryParam & - TaskGetHeaderParam & - RequestParameters; - -export interface TaskUpdateHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface TaskUpdateBodyParam { - /** The parameters for the request. */ - body: BatchTask; -} - -export interface TaskUpdateQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface TaskUpdateQueryParam { - queryParameters?: TaskUpdateQueryParamProperties; -} - -export interface TaskUpdateHeaderParam { - headers?: RawHttpHeadersInput & TaskUpdateHeaders; -} - -export type TaskUpdateParameters = TaskUpdateQueryParam & - TaskUpdateHeaderParam & - TaskUpdateBodyParam & - RequestParameters; - -export interface TaskListSubtasksHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface TaskListSubtasksQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select?: string; -} - -export interface TaskListSubtasksQueryParam { - queryParameters?: TaskListSubtasksQueryParamProperties; -} - -export interface TaskListSubtasksHeaderParam { - headers?: RawHttpHeadersInput & TaskListSubtasksHeaders; -} - -export type TaskListSubtasksParameters = TaskListSubtasksQueryParam & - TaskListSubtasksHeaderParam & - RequestParameters; - -export interface TaskTerminateHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface TaskTerminateQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface TaskTerminateQueryParam { - queryParameters?: TaskTerminateQueryParamProperties; -} - -export interface TaskTerminateHeaderParam { - headers?: RawHttpHeadersInput & TaskTerminateHeaders; -} - -export type TaskTerminateParameters = TaskTerminateQueryParam & - TaskTerminateHeaderParam & - RequestParameters; - -export interface TaskReactivateHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; -} - -export interface TaskReactivateQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface TaskReactivateQueryParam { - queryParameters?: TaskReactivateQueryParamProperties; -} - -export interface TaskReactivateHeaderParam { - headers?: RawHttpHeadersInput & TaskReactivateHeaders; -} - -export type TaskReactivateParameters = TaskReactivateQueryParam & - TaskReactivateHeaderParam & - RequestParameters; - -export interface ComputeNodeOperationsAddUserHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsAddUserBodyParam { - /** The user Account to be created. */ - body: ComputeNodeUser; -} - -export interface ComputeNodeOperationsAddUserQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsAddUserQueryParam { - queryParameters?: ComputeNodeOperationsAddUserQueryParamProperties; -} - -export interface ComputeNodeOperationsAddUserHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsAddUserHeaders; -} - -export type ComputeNodeOperationsAddUserParameters = - ComputeNodeOperationsAddUserQueryParam & - ComputeNodeOperationsAddUserHeaderParam & - ComputeNodeOperationsAddUserBodyParam & - RequestParameters; - -export interface ComputeNodeOperationsDeleteUserHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsDeleteUserQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsDeleteUserQueryParam { - queryParameters?: ComputeNodeOperationsDeleteUserQueryParamProperties; -} - -export interface ComputeNodeOperationsDeleteUserHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsDeleteUserHeaders; -} - -export type ComputeNodeOperationsDeleteUserParameters = - ComputeNodeOperationsDeleteUserQueryParam & - ComputeNodeOperationsDeleteUserHeaderParam & - RequestParameters; - -export interface ComputeNodeOperationsUpdateUserHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsUpdateUserBodyParam { - /** The parameters for the request. */ - body: NodeUpdateUserParameters; -} - -export interface ComputeNodeOperationsUpdateUserQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsUpdateUserQueryParam { - queryParameters?: ComputeNodeOperationsUpdateUserQueryParamProperties; -} - -export interface ComputeNodeOperationsUpdateUserHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsUpdateUserHeaders; -} - -export type ComputeNodeOperationsUpdateUserParameters = - ComputeNodeOperationsUpdateUserQueryParam & - ComputeNodeOperationsUpdateUserHeaderParam & - ComputeNodeOperationsUpdateUserBodyParam & - RequestParameters; - -export interface ComputeNodeOperationsGetHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsGetQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select?: string; -} - -export interface ComputeNodeOperationsGetQueryParam { - queryParameters?: ComputeNodeOperationsGetQueryParamProperties; -} - -export interface ComputeNodeOperationsGetHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsGetHeaders; -} - -export type ComputeNodeOperationsGetParameters = - ComputeNodeOperationsGetQueryParam & - ComputeNodeOperationsGetHeaderParam & - RequestParameters; - -export interface ComputeNodeOperationsRebootHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsRebootBodyParam { - /** The parameters for the request. */ - body: NodeRebootParameters; -} - -export interface ComputeNodeOperationsRebootQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsRebootQueryParam { - queryParameters?: ComputeNodeOperationsRebootQueryParamProperties; -} - -export interface ComputeNodeOperationsRebootHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsRebootHeaders; -} - -export type ComputeNodeOperationsRebootParameters = - ComputeNodeOperationsRebootQueryParam & - ComputeNodeOperationsRebootHeaderParam & - ComputeNodeOperationsRebootBodyParam & - RequestParameters; - -export interface ComputeNodeOperationsReimageHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsReimageBodyParam { - /** The parameters for the request. */ - body: NodeReimageParameters; -} - -export interface ComputeNodeOperationsReimageQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsReimageQueryParam { - queryParameters?: ComputeNodeOperationsReimageQueryParamProperties; -} - -export interface ComputeNodeOperationsReimageHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsReimageHeaders; -} - -export type ComputeNodeOperationsReimageParameters = - ComputeNodeOperationsReimageQueryParam & - ComputeNodeOperationsReimageHeaderParam & - ComputeNodeOperationsReimageBodyParam & - RequestParameters; - -export interface ComputeNodeOperationsDisableSchedulingHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsDisableSchedulingBodyParam { - /** The parameters for the request. */ - body: NodeDisableSchedulingParameters; -} - -export interface ComputeNodeOperationsDisableSchedulingQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsDisableSchedulingQueryParam { - queryParameters?: ComputeNodeOperationsDisableSchedulingQueryParamProperties; -} - -export interface ComputeNodeOperationsDisableSchedulingHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsDisableSchedulingHeaders; -} - -export type ComputeNodeOperationsDisableSchedulingParameters = - ComputeNodeOperationsDisableSchedulingQueryParam & - ComputeNodeOperationsDisableSchedulingHeaderParam & - ComputeNodeOperationsDisableSchedulingBodyParam & - RequestParameters; - -export interface ComputeNodeOperationsEnableSchedulingHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsEnableSchedulingQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsEnableSchedulingQueryParam { - queryParameters?: ComputeNodeOperationsEnableSchedulingQueryParamProperties; -} - -export interface ComputeNodeOperationsEnableSchedulingHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsEnableSchedulingHeaders; -} - -export type ComputeNodeOperationsEnableSchedulingParameters = - ComputeNodeOperationsEnableSchedulingQueryParam & - ComputeNodeOperationsEnableSchedulingHeaderParam & - RequestParameters; - -export interface ComputeNodeOperationsGetRemoteLoginSettingsHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsGetRemoteLoginSettingsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsGetRemoteLoginSettingsQueryParam { - queryParameters?: ComputeNodeOperationsGetRemoteLoginSettingsQueryParamProperties; -} - -export interface ComputeNodeOperationsGetRemoteLoginSettingsHeaderParam { - headers?: RawHttpHeadersInput & - ComputeNodeOperationsGetRemoteLoginSettingsHeaders; -} - -export type ComputeNodeOperationsGetRemoteLoginSettingsParameters = - ComputeNodeOperationsGetRemoteLoginSettingsQueryParam & - ComputeNodeOperationsGetRemoteLoginSettingsHeaderParam & - RequestParameters; - -export interface ComputeNodeOperationsGetRemoteDesktopHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsGetRemoteDesktopQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsGetRemoteDesktopQueryParam { - queryParameters?: ComputeNodeOperationsGetRemoteDesktopQueryParamProperties; -} - -export interface ComputeNodeOperationsGetRemoteDesktopHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsGetRemoteDesktopHeaders; -} - -export type ComputeNodeOperationsGetRemoteDesktopParameters = - ComputeNodeOperationsGetRemoteDesktopQueryParam & - ComputeNodeOperationsGetRemoteDesktopHeaderParam & - RequestParameters; - -export interface ComputeNodeOperationsUploadBatchServiceLogsHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeOperationsUploadBatchServiceLogsBodyParam { - /** The Azure Batch service log files upload configuration. */ - body: UploadBatchServiceLogsConfiguration; -} - -export interface ComputeNodeOperationsUploadBatchServiceLogsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface ComputeNodeOperationsUploadBatchServiceLogsQueryParam { - queryParameters?: ComputeNodeOperationsUploadBatchServiceLogsQueryParamProperties; -} - -export interface ComputeNodeOperationsUploadBatchServiceLogsHeaderParam { - headers?: RawHttpHeadersInput & - ComputeNodeOperationsUploadBatchServiceLogsHeaders; -} - -export type ComputeNodeOperationsUploadBatchServiceLogsParameters = - ComputeNodeOperationsUploadBatchServiceLogsQueryParam & - ComputeNodeOperationsUploadBatchServiceLogsHeaderParam & - ComputeNodeOperationsUploadBatchServiceLogsBodyParam & - RequestParameters; - -export interface ComputeNodeOperationsListHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface ComputeNodeOperationsListQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. - */ - $filter: string; - /** An OData $select clause. */ - $select?: string; -} - -export interface ComputeNodeOperationsListQueryParam { - queryParameters: ComputeNodeOperationsListQueryParamProperties; -} - -export interface ComputeNodeOperationsListHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeOperationsListHeaders; -} - -export type ComputeNodeOperationsListParameters = - ComputeNodeOperationsListQueryParam & - ComputeNodeOperationsListHeaderParam & - RequestParameters; - -export interface ComputeNodeExtensionOperationsGetHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface ComputeNodeExtensionOperationsGetQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select?: string; -} - -export interface ComputeNodeExtensionOperationsGetQueryParam { - queryParameters?: ComputeNodeExtensionOperationsGetQueryParamProperties; -} - -export interface ComputeNodeExtensionOperationsGetHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeExtensionOperationsGetHeaders; -} - -export type ComputeNodeExtensionOperationsGetParameters = - ComputeNodeExtensionOperationsGetQueryParam & - ComputeNodeExtensionOperationsGetHeaderParam & - RequestParameters; - -export interface ComputeNodeExtensionOperationsListHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface ComputeNodeExtensionOperationsListQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** An OData $select clause. */ - $select?: string; -} - -export interface ComputeNodeExtensionOperationsListQueryParam { - queryParameters?: ComputeNodeExtensionOperationsListQueryParamProperties; -} - -export interface ComputeNodeExtensionOperationsListHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodeExtensionOperationsListHeaders; -} - -export type ComputeNodeExtensionOperationsListParameters = - ComputeNodeExtensionOperationsListQueryParam & - ComputeNodeExtensionOperationsListHeaderParam & - RequestParameters; diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/responses.ts b/packages/typespec-test/test/batch/generated/typespec-ts/src/responses.ts deleted file mode 100644 index 05170629b1..0000000000 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/responses.ts +++ /dev/null @@ -1,2325 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { RawHttpHeaders } from "@azure/core-rest-pipeline"; -import { HttpResponse, ErrorResponse } from "@azure-rest/core-client"; -import { - ApplicationListResultOutput, - ApplicationOutput, - PoolUsageMetricsListOutput, - PoolStatisticsOutput, - BatchPoolListResultOutput, - BatchPoolOutput, - AutoScaleRunOutput, - AccountListSupportedImagesResultOutput, - PoolNodeCountsListResultOutput, - JobStatisticsOutput, - BatchJobOutput, - BatchJobListResultOutput, - BatchJobListPreparationAndReleaseTaskStatusResultOutput, - TaskCountsResultOutput, - CertificateListResultOutput, - CertificateOutput, - NodeFileListResultOutput, - BatchJobScheduleOutput, - BatchJobScheduleListResultOutput, - BatchTaskListResultOutput, - TaskAddCollectionResultOutput, - BatchTaskOutput, - BatchTaskListSubtasksResultOutput, - ComputeNodeOutput, - ComputeNodeGetRemoteLoginSettingsResultOutput, - UploadBatchServiceLogsResultOutput, - ComputeNodeListResultOutput, - NodeVMExtensionOutput, - NodeVMExtensionListOutput, -} from "./outputModels"; - -export interface ApplicationOperationsList200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface ApplicationOperationsList200Response extends HttpResponse { - status: "200"; - body: ApplicationListResultOutput; - headers: RawHttpHeaders & ApplicationOperationsList200Headers; -} - -export interface ApplicationOperationsListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ApplicationOperationsListDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ApplicationOperationsListDefaultHeaders; -} - -/** The request has succeeded. */ -export interface ApplicationOperationsGet200Response extends HttpResponse { - status: "200"; - body: ApplicationOutput; -} - -export interface ApplicationOperationsGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ApplicationOperationsGetDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ApplicationOperationsGetDefaultHeaders; -} - -/** The request has succeeded. */ -export interface PoolListUsageMetrics200Response extends HttpResponse { - status: "200"; - body: PoolUsageMetricsListOutput; -} - -export interface PoolListUsageMetricsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolListUsageMetricsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolListUsageMetricsDefaultHeaders; -} - -export interface PoolGetAllLifetimeStatistics200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface PoolGetAllLifetimeStatistics200Response extends HttpResponse { - status: "200"; - body: PoolStatisticsOutput; - headers: RawHttpHeaders & PoolGetAllLifetimeStatistics200Headers; -} - -export interface PoolGetAllLifetimeStatisticsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolGetAllLifetimeStatisticsDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolGetAllLifetimeStatisticsDefaultHeaders; -} - -export interface PoolAdd201Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded and a new resource has been created as a result. */ -export interface PoolAdd201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & PoolAdd201Headers; -} - -export interface PoolAddDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolAddDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolAddDefaultHeaders; -} - -export interface PoolList200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface PoolList200Response extends HttpResponse { - status: "200"; - body: BatchPoolListResultOutput; - headers: RawHttpHeaders & PoolList200Headers; -} - -export interface PoolListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolListDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolListDefaultHeaders; -} - -export interface PoolDelete202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The parameters for a widget status request */ -export interface PoolDelete202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & PoolDelete202Headers; -} - -export interface PoolDeleteDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolDeleteDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolDeleteDefaultHeaders; -} - -/** The Pool does not exist. */ -export interface PoolExists404Response extends HttpResponse { - status: "404"; -} - -export interface PoolExistsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolExistsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolExistsDefaultHeaders; -} - -export interface PoolGet200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface PoolGet200Response extends HttpResponse { - status: "200"; - body: BatchPoolOutput; - headers: RawHttpHeaders & PoolGet200Headers; -} - -export interface PoolGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolGetDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolGetDefaultHeaders; -} - -export interface PoolPatch200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface PoolPatch200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & PoolPatch200Headers; -} - -export interface PoolPatchDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolPatchDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolPatchDefaultHeaders; -} - -export interface PoolDisableAutoScale200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface PoolDisableAutoScale200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & PoolDisableAutoScale200Headers; -} - -export interface PoolDisableAutoScaleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolDisableAutoScaleDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolDisableAutoScaleDefaultHeaders; -} - -export interface PoolEnableAutoScale200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface PoolEnableAutoScale200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & PoolEnableAutoScale200Headers; -} - -export interface PoolEnableAutoScaleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolEnableAutoScaleDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolEnableAutoScaleDefaultHeaders; -} - -export interface PoolEvaluateAutoScale200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface PoolEvaluateAutoScale200Response extends HttpResponse { - status: "200"; - body: AutoScaleRunOutput; - headers: RawHttpHeaders & PoolEvaluateAutoScale200Headers; -} - -export interface PoolEvaluateAutoScaleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolEvaluateAutoScaleDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolEvaluateAutoScaleDefaultHeaders; -} - -export interface PoolResize200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface PoolResize200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & PoolResize200Headers; -} - -export interface PoolResizeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolResizeDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolResizeDefaultHeaders; -} - -export interface PoolStopResize200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface PoolStopResize200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & PoolStopResize200Headers; -} - -export interface PoolStopResizeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolStopResizeDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolStopResizeDefaultHeaders; -} - -export interface PoolUpdateProperties200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface PoolUpdateProperties200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & PoolUpdateProperties200Headers; -} - -export interface PoolUpdatePropertiesDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolUpdatePropertiesDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolUpdatePropertiesDefaultHeaders; -} - -export interface PoolRemoveNodes200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface PoolRemoveNodes200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & PoolRemoveNodes200Headers; -} - -export interface PoolRemoveNodesDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolRemoveNodesDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolRemoveNodesDefaultHeaders; -} - -export interface AccountListSupportedImages200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface AccountListSupportedImages200Response extends HttpResponse { - status: "200"; - body: AccountListSupportedImagesResultOutput; - headers: RawHttpHeaders & AccountListSupportedImages200Headers; -} - -export interface AccountListSupportedImagesDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface AccountListSupportedImagesDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & AccountListSupportedImagesDefaultHeaders; -} - -export interface AccountListPoolNodeCounts200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The request has succeeded. */ -export interface AccountListPoolNodeCounts200Response extends HttpResponse { - status: "200"; - body: PoolNodeCountsListResultOutput; - headers: RawHttpHeaders & AccountListPoolNodeCounts200Headers; -} - -export interface AccountListPoolNodeCountsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface AccountListPoolNodeCountsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & AccountListPoolNodeCountsDefaultHeaders; -} - -export interface JobGetAllLifetimeStatistics200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobGetAllLifetimeStatistics200Response extends HttpResponse { - status: "200"; - body: JobStatisticsOutput; - headers: RawHttpHeaders & JobGetAllLifetimeStatistics200Headers; -} - -export interface JobGetAllLifetimeStatisticsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobGetAllLifetimeStatisticsDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobGetAllLifetimeStatisticsDefaultHeaders; -} - -export interface JobDelete202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The parameters for a widget status request */ -export interface JobDelete202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & JobDelete202Headers; -} - -export interface JobDeleteDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobDeleteDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobDeleteDefaultHeaders; -} - -export interface JobGet200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobGet200Response extends HttpResponse { - status: "200"; - body: BatchJobOutput; - headers: RawHttpHeaders & JobGet200Headers; -} - -export interface JobGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobGetDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobGetDefaultHeaders; -} - -export interface JobPatch200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface JobPatch200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & JobPatch200Headers; -} - -export interface JobPatchDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobPatchDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobPatchDefaultHeaders; -} - -export interface JobUpdate200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface JobUpdate200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & JobUpdate200Headers; -} - -export interface JobUpdateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobUpdateDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobUpdateDefaultHeaders; -} - -export interface JobDisable202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface JobDisable202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & JobDisable202Headers; -} - -export interface JobDisableDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobDisableDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobDisableDefaultHeaders; -} - -export interface JobEnable202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface JobEnable202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & JobEnable202Headers; -} - -export interface JobEnableDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobEnableDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobEnableDefaultHeaders; -} - -export interface JobTerminate202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface JobTerminate202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & JobTerminate202Headers; -} - -export interface JobTerminateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobTerminateDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobTerminateDefaultHeaders; -} - -export interface JobAdd201Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied */ - dataserviceid: string; -} - -/** The request has succeeded and a new resource has been created as a result. */ -export interface JobAdd201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & JobAdd201Headers; -} - -export interface JobAddDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobAddDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobAddDefaultHeaders; -} - -export interface JobList200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobList200Response extends HttpResponse { - status: "200"; - body: BatchJobListResultOutput; - headers: RawHttpHeaders & JobList200Headers; -} - -export interface JobListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobListDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobListDefaultHeaders; -} - -export interface JobListFromJobSchedule200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobListFromJobSchedule200Response extends HttpResponse { - status: "200"; - body: BatchJobListResultOutput; - headers: RawHttpHeaders & JobListFromJobSchedule200Headers; -} - -export interface JobListFromJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobListFromJobScheduleDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobListFromJobScheduleDefaultHeaders; -} - -export interface JobListPreparationAndReleaseTaskStatus200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobListPreparationAndReleaseTaskStatus200Response - extends HttpResponse { - status: "200"; - body: BatchJobListPreparationAndReleaseTaskStatusResultOutput; - headers: RawHttpHeaders & JobListPreparationAndReleaseTaskStatus200Headers; -} - -export interface JobListPreparationAndReleaseTaskStatusDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobListPreparationAndReleaseTaskStatusDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & - JobListPreparationAndReleaseTaskStatusDefaultHeaders; -} - -export interface JobGetTaskCounts200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The request has succeeded. */ -export interface JobGetTaskCounts200Response extends HttpResponse { - status: "200"; - body: TaskCountsResultOutput; - headers: RawHttpHeaders & JobGetTaskCounts200Headers; -} - -export interface JobGetTaskCountsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobGetTaskCountsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobGetTaskCountsDefaultHeaders; -} - -export interface CertificateOperationsAdd201Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded and a new resource has been created as a result. */ -export interface CertificateOperationsAdd201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & CertificateOperationsAdd201Headers; -} - -export interface CertificateOperationsAddDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface CertificateOperationsAddDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificateOperationsAddDefaultHeaders; -} - -export interface CertificateOperationsList200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface CertificateOperationsList200Response extends HttpResponse { - status: "200"; - body: CertificateListResultOutput; - headers: RawHttpHeaders & CertificateOperationsList200Headers; -} - -export interface CertificateOperationsListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface CertificateOperationsListDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificateOperationsListDefaultHeaders; -} - -export interface CertificateOperationsCancelDeletion204Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** There is no content to send for this request, but the headers may be useful. */ -export interface CertificateOperationsCancelDeletion204Response - extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & CertificateOperationsCancelDeletion204Headers; -} - -export interface CertificateOperationsCancelDeletionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface CertificateOperationsCancelDeletionDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificateOperationsCancelDeletionDefaultHeaders; -} - -export interface CertificateOperationsDelete202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface CertificateOperationsDelete202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & CertificateOperationsDelete202Headers; -} - -export interface CertificateOperationsDeleteDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface CertificateOperationsDeleteDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificateOperationsDeleteDefaultHeaders; -} - -export interface CertificateOperationsGet200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface CertificateOperationsGet200Response extends HttpResponse { - status: "200"; - body: CertificateOutput; - headers: RawHttpHeaders & CertificateOperationsGet200Headers; -} - -export interface CertificateOperationsGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface CertificateOperationsGetDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificateOperationsGetDefaultHeaders; -} - -export interface FileDeleteFromTask200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The request has succeeded. */ -export interface FileDeleteFromTask200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & FileDeleteFromTask200Headers; -} - -export interface FileDeleteFromTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface FileDeleteFromTaskDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileDeleteFromTaskDefaultHeaders; -} - -export interface FileGetFromTask200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The file creation time. */ - "ocp-creation-time"?: string; - /** Whether the object represents a directory. */ - "ocp-batch-file-isdirectory": boolean; - /** The URL of the file. */ - "ocp-batch-file-url": string; - /** The file mode attribute in octal format. */ - "ocp-batch-file-mode": string; - /** The length of the file. */ - "content-length": number; -} - -/** The request has succeeded. */ -export interface FileGetFromTask200Response extends HttpResponse { - status: "200"; - /** Value may contain any sequence of octets */ - body: Uint8Array; - headers: RawHttpHeaders & FileGetFromTask200Headers; -} - -export interface FileGetFromTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface FileGetFromTaskDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileGetFromTaskDefaultHeaders; -} - -export interface FileGetPropertiesFromTask200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The file creation time. */ - "ocp-creation-time"?: string; - /** Whether the object represents a directory. */ - "ocp-batch-file-isdirectory": boolean; - /** The URL of the file. */ - "ocp-batch-file-url": string; - /** The file mode attribute in octal format. */ - "ocp-batch-file-mode": string; - /** The length of the file. */ - "content-length": number; -} - -/** The request has succeeded. */ -export interface FileGetPropertiesFromTask200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & FileGetPropertiesFromTask200Headers; -} - -export interface FileGetPropertiesFromTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface FileGetPropertiesFromTaskDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileGetPropertiesFromTaskDefaultHeaders; -} - -export interface FileDeleteFromComputeNode200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The request has succeeded. */ -export interface FileDeleteFromComputeNode200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & FileDeleteFromComputeNode200Headers; -} - -export interface FileDeleteFromComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface FileDeleteFromComputeNodeDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileDeleteFromComputeNodeDefaultHeaders; -} - -export interface FileGetFromComputeNode200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The file creation time. */ - "ocp-creation-time"?: string; - /** Whether the object represents a directory. */ - "ocp-batch-file-isdirectory": boolean; - /** The URL of the file. */ - "ocp-batch-file-url": string; - /** The file mode attribute in octal format. */ - "ocp-batch-file-mode": string; - /** The length of the file. */ - "content-length": number; -} - -/** The request has succeeded. */ -export interface FileGetFromComputeNode200Response extends HttpResponse { - status: "200"; - /** Value may contain any sequence of octets */ - body: Uint8Array; - headers: RawHttpHeaders & FileGetFromComputeNode200Headers; -} - -export interface FileGetFromComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface FileGetFromComputeNodeDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileGetFromComputeNodeDefaultHeaders; -} - -export interface FileGetPropertiesFromComputeNode200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The file creation time. */ - "ocp-creation-time"?: string; - /** Whether the object represents a directory. */ - "ocp-batch-file-isdirectory": boolean; - /** The URL of the file. */ - "ocp-batch-file-url": string; - /** The file mode attribute in octal format. */ - "ocp-batch-file-mode": string; - /** The length of the file. */ - "content-length": number; -} - -/** The request has succeeded. */ -export interface FileGetPropertiesFromComputeNode200Response - extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & FileGetPropertiesFromComputeNode200Headers; -} - -export interface FileGetPropertiesFromComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface FileGetPropertiesFromComputeNodeDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileGetPropertiesFromComputeNodeDefaultHeaders; -} - -export interface FileListFromTask200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface FileListFromTask200Response extends HttpResponse { - status: "200"; - body: NodeFileListResultOutput; - headers: RawHttpHeaders & FileListFromTask200Headers; -} - -export interface FileListFromTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface FileListFromTaskDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileListFromTaskDefaultHeaders; -} - -export interface FileListFromComputeNode200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface FileListFromComputeNode200Response extends HttpResponse { - status: "200"; - body: NodeFileListResultOutput; - headers: RawHttpHeaders & FileListFromComputeNode200Headers; -} - -export interface FileListFromComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface FileListFromComputeNodeDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileListFromComputeNodeDefaultHeaders; -} - -export interface JobScheduleExists200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobScheduleExists200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & JobScheduleExists200Headers; -} - -/** There is no content to send for this request, but the headers may be useful. */ -export interface JobScheduleExists204Response extends HttpResponse { - status: "204"; -} - -export interface JobScheduleExistsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleExistsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleExistsDefaultHeaders; -} - -export interface JobScheduleDelete202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The parameters for a widget status request */ -export interface JobScheduleDelete202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & JobScheduleDelete202Headers; -} - -export interface JobScheduleDeleteDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleDeleteDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleDeleteDefaultHeaders; -} - -export interface JobScheduleGet200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobScheduleGet200Response extends HttpResponse { - status: "200"; - body: BatchJobScheduleOutput; - headers: RawHttpHeaders & JobScheduleGet200Headers; -} - -export interface JobScheduleGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleGetDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleGetDefaultHeaders; -} - -export interface JobSchedulePatch200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface JobSchedulePatch200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & JobSchedulePatch200Headers; -} - -export interface JobSchedulePatchDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobSchedulePatchDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobSchedulePatchDefaultHeaders; -} - -export interface JobScheduleUpdate200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface JobScheduleUpdate200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & JobScheduleUpdate200Headers; -} - -export interface JobScheduleUpdateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleUpdateDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleUpdateDefaultHeaders; -} - -export interface JobScheduleDisable204Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** There is no content to send for this request, but the headers may be useful. */ -export interface JobScheduleDisable204Response extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & JobScheduleDisable204Headers; -} - -export interface JobScheduleDisableDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleDisableDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleDisableDefaultHeaders; -} - -export interface JobScheduleEnable204Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** There is no content to send for this request, but the headers may be useful. */ -export interface JobScheduleEnable204Response extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & JobScheduleEnable204Headers; -} - -export interface JobScheduleEnableDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleEnableDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleEnableDefaultHeaders; -} - -export interface JobScheduleTerminate202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface JobScheduleTerminate202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & JobScheduleTerminate202Headers; -} - -export interface JobScheduleTerminateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleTerminateDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleTerminateDefaultHeaders; -} - -export interface JobScheduleAdd201Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded and a new resource has been created as a result. */ -export interface JobScheduleAdd201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & JobScheduleAdd201Headers; -} - -export interface JobScheduleAddDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleAddDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleAddDefaultHeaders; -} - -export interface JobScheduleList200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobScheduleList200Response extends HttpResponse { - status: "200"; - body: BatchJobScheduleListResultOutput; - headers: RawHttpHeaders & JobScheduleList200Headers; -} - -export interface JobScheduleListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobScheduleListDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleListDefaultHeaders; -} - -export interface TaskAdd201Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded and a new resource has been created as a result. */ -export interface TaskAdd201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & TaskAdd201Headers; -} - -export interface TaskAddDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskAddDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskAddDefaultHeaders; -} - -export interface TaskList200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface TaskList200Response extends HttpResponse { - status: "200"; - body: BatchTaskListResultOutput; - headers: RawHttpHeaders & TaskList200Headers; -} - -export interface TaskListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskListDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskListDefaultHeaders; -} - -export interface TaskAddCollection200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The request has succeeded. */ -export interface TaskAddCollection200Response extends HttpResponse { - status: "200"; - body: TaskAddCollectionResultOutput; - headers: RawHttpHeaders & TaskAddCollection200Headers; -} - -export interface TaskAddCollectionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskAddCollectionDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskAddCollectionDefaultHeaders; -} - -export interface TaskDelete200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The request has succeeded. */ -export interface TaskDelete200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & TaskDelete200Headers; -} - -export interface TaskDeleteDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskDeleteDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskDeleteDefaultHeaders; -} - -export interface TaskGet200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface TaskGet200Response extends HttpResponse { - status: "200"; - body: BatchTaskOutput; - headers: RawHttpHeaders & TaskGet200Headers; -} - -export interface TaskGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskGetDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskGetDefaultHeaders; -} - -export interface TaskUpdate200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface TaskUpdate200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & TaskUpdate200Headers; -} - -export interface TaskUpdateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskUpdateDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskUpdateDefaultHeaders; -} - -export interface TaskListSubtasks200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface TaskListSubtasks200Response extends HttpResponse { - status: "200"; - body: BatchTaskListSubtasksResultOutput; - headers: RawHttpHeaders & TaskListSubtasks200Headers; -} - -export interface TaskListSubtasksDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskListSubtasksDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskListSubtasksDefaultHeaders; -} - -export interface TaskTerminate204Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** There is no content to send for this request, but the headers may be useful. */ -export interface TaskTerminate204Response extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & TaskTerminate204Headers; -} - -export interface TaskTerminateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskTerminateDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskTerminateDefaultHeaders; -} - -export interface TaskReactivate204Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** There is no content to send for this request, but the headers may be useful. */ -export interface TaskReactivate204Response extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & TaskReactivate204Headers; -} - -export interface TaskReactivateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskReactivateDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskReactivateDefaultHeaders; -} - -export interface ComputeNodeOperationsAddUser201Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded and a new resource has been created as a result. */ -export interface ComputeNodeOperationsAddUser201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & ComputeNodeOperationsAddUser201Headers; -} - -export interface ComputeNodeOperationsAddUserDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsAddUserDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsAddUserDefaultHeaders; -} - -export interface ComputeNodeOperationsDeleteUser200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsDeleteUser200Response - extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & ComputeNodeOperationsDeleteUser200Headers; -} - -export interface ComputeNodeOperationsDeleteUserDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsDeleteUserDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsDeleteUserDefaultHeaders; -} - -export interface ComputeNodeOperationsUpdateUser200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsUpdateUser200Response - extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & ComputeNodeOperationsUpdateUser200Headers; -} - -export interface ComputeNodeOperationsUpdateUserDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsUpdateUserDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsUpdateUserDefaultHeaders; -} - -export interface ComputeNodeOperationsGet200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsGet200Response extends HttpResponse { - status: "200"; - body: ComputeNodeOutput; - headers: RawHttpHeaders & ComputeNodeOperationsGet200Headers; -} - -export interface ComputeNodeOperationsGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsGetDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsGetDefaultHeaders; -} - -export interface ComputeNodeOperationsReboot202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface ComputeNodeOperationsReboot202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & ComputeNodeOperationsReboot202Headers; -} - -export interface ComputeNodeOperationsRebootDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsRebootDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsRebootDefaultHeaders; -} - -export interface ComputeNodeOperationsReimage202Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface ComputeNodeOperationsReimage202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & ComputeNodeOperationsReimage202Headers; -} - -export interface ComputeNodeOperationsReimageDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsReimageDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsReimageDefaultHeaders; -} - -export interface ComputeNodeOperationsDisableScheduling200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsDisableScheduling200Response - extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & ComputeNodeOperationsDisableScheduling200Headers; -} - -export interface ComputeNodeOperationsDisableSchedulingDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsDisableSchedulingDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & - ComputeNodeOperationsDisableSchedulingDefaultHeaders; -} - -export interface ComputeNodeOperationsEnableScheduling200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsEnableScheduling200Response - extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & ComputeNodeOperationsEnableScheduling200Headers; -} - -export interface ComputeNodeOperationsEnableSchedulingDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsEnableSchedulingDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsEnableSchedulingDefaultHeaders; -} - -export interface ComputeNodeOperationsGetRemoteLoginSettings200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsGetRemoteLoginSettings200Response - extends HttpResponse { - status: "200"; - body: ComputeNodeGetRemoteLoginSettingsResultOutput; - headers: RawHttpHeaders & - ComputeNodeOperationsGetRemoteLoginSettings200Headers; -} - -export interface ComputeNodeOperationsGetRemoteLoginSettingsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsGetRemoteLoginSettingsDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & - ComputeNodeOperationsGetRemoteLoginSettingsDefaultHeaders; -} - -export interface ComputeNodeOperationsGetRemoteDesktop200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsGetRemoteDesktop200Response - extends HttpResponse { - status: "200"; - /** Value may contain any sequence of octets */ - body: Uint8Array; - headers: RawHttpHeaders & ComputeNodeOperationsGetRemoteDesktop200Headers; -} - -export interface ComputeNodeOperationsGetRemoteDesktopDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsGetRemoteDesktopDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsGetRemoteDesktopDefaultHeaders; -} - -export interface ComputeNodeOperationsUploadBatchServiceLogs200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsUploadBatchServiceLogs200Response - extends HttpResponse { - status: "200"; - body: UploadBatchServiceLogsResultOutput; - headers: RawHttpHeaders & - ComputeNodeOperationsUploadBatchServiceLogs200Headers; -} - -export interface ComputeNodeOperationsUploadBatchServiceLogsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsUploadBatchServiceLogsDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & - ComputeNodeOperationsUploadBatchServiceLogsDefaultHeaders; -} - -export interface ComputeNodeOperationsList200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeOperationsList200Response extends HttpResponse { - status: "200"; - body: ComputeNodeListResultOutput; - headers: RawHttpHeaders & ComputeNodeOperationsList200Headers; -} - -export interface ComputeNodeOperationsListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeOperationsListDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeOperationsListDefaultHeaders; -} - -export interface ComputeNodeExtensionOperationsGet200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeExtensionOperationsGet200Response - extends HttpResponse { - status: "200"; - body: NodeVMExtensionOutput; - headers: RawHttpHeaders & ComputeNodeExtensionOperationsGet200Headers; -} - -export interface ComputeNodeExtensionOperationsGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeExtensionOperationsGetDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeExtensionOperationsGetDefaultHeaders; -} - -export interface ComputeNodeExtensionOperationsList200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface ComputeNodeExtensionOperationsList200Response - extends HttpResponse { - status: "200"; - body: NodeVMExtensionListOutput; - headers: RawHttpHeaders & ComputeNodeExtensionOperationsList200Headers; -} - -export interface ComputeNodeExtensionOperationsListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodeExtensionOperationsListDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodeExtensionOperationsListDefaultHeaders; -} diff --git a/packages/typespec-test/test/batch/skip b/packages/typespec-test/test/batch/skip deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/packages/typespec-test/test/batch/spec/main.tsp b/packages/typespec-test/test/batch/spec/main.tsp deleted file mode 100644 index 7b92fc9fcd..0000000000 --- a/packages/typespec-test/test/batch/spec/main.tsp +++ /dev/null @@ -1,33 +0,0 @@ -import "@typespec/rest"; -import "@typespec/versioning"; -import "@azure-tools/typespec-azure-core"; -import "@azure-tools/typespec-autorest"; -import "@typespec/openapi"; -import "./routes.tsp"; - -using TypeSpec.Reflection; -using TypeSpec.Http; -using TypeSpec.Rest; -using Autorest; -using TypeSpec.Versioning; -using Azure.Core; -using OpenAPI; - -@service({ - title: "BatchServiceClient", - version: "2022-10-01.16.0", -}) -@useAuth(AADToken) -@useDependency(Azure.Core.Versions.v1_0_Preview_1) -@doc("A client for issuing REST requests to the Azure Batch service.") -namespace BatchServiceClient; - -@doc("The Azure Active Directory OAuth2 Flow") -model AADToken - is OAuth2Auth<[ - { - type: OAuth2FlowType.implicit; - authorizationUrl: "https://login.microsoftonline.com/common/oauth2/authorize"; - scopes: ["user_impersonation"]; - } - ]>; diff --git a/packages/typespec-test/test/batch/spec/models.tsp b/packages/typespec-test/test/batch/spec/models.tsp deleted file mode 100644 index 405f0ba6b5..0000000000 --- a/packages/typespec-test/test/batch/spec/models.tsp +++ /dev/null @@ -1,5105 +0,0 @@ -import "@typespec/rest"; -import "@azure-tools/typespec-azure-core"; - -using TypeSpec.Reflection; -using TypeSpec.Http; -using TypeSpec.Rest; -using Autorest; -using TypeSpec.Versioning; -using Azure.Core; -using OpenAPI; - -namespace BatchServiceClient; - -enum OSType { - @doc("The Linux operating system.") - Linux: "linux", - - @doc("The Windows operating system.") - Windows: "windows", -} - -enum VerificationType { - @doc("The Image is guaranteed to be compatible with the associated Compute Node agent SKU and all Batch features have been confirmed to work as expected.") - Verified: "verified", - - @doc("The associated Compute Node agent SKU should have binary compatibility with the Image, but specific functionality has not been verified.") - Unverified: "unverified", -} - -enum CertificateState { - @doc("The Certificate is available for use in Pools.") - Active: "active", - - @doc("The user has requested that the Certificate be deleted, but the delete operation has not yet completed. You may not reference the Certificate when creating or updating Pools.") - Deleting: "deleting", - - @doc("The user requested that the Certificate be deleted, but there are Pools that still have references to the Certificate, or it is still installed on one or more Nodes. (The latter can occur if the Certificate has been removed from the Pool, but the Compute Node has not yet restarted. Compute Nodes refresh their Certificates only when they restart.) You may use the cancel Certificate delete operation to cancel the delete, or the delete Certificate operation to retry the delete.") - DeleteFailed: "deletefailed", -} - -enum CertificateFormat { - @doc("The Certificate is a PFX (PKCS#12) formatted Certificate or Certificate chain.") - Pfx: "pfx", - - @doc("The Certificate is a base64-encoded X.509 Certificate.") - Cer: "cer", -} - -enum JobScheduleState { - @doc("The Job Schedule is active and will create Jobs as per its schedule.") - Active: "active", - - @doc("The Job Schedule has terminated, either by reaching its end time or by the user terminating it explicitly.") - Completed: "completed", - - @doc("The user has disabled the Job Schedule. The scheduler will not initiate any new Jobs will on this schedule, but any existing active Job will continue to run.") - Disabled: "disabled", - - @doc("The Job Schedule has no more work to do, or has been explicitly terminated by the user, but the termination operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, nor is any existing Job active.") - Terminating: "terminating", - - @doc("The user has requested that the Job Schedule be deleted, but the delete operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, and will delete any existing Jobs and Tasks under the Job Schedule, including any active Job. The Job Schedule will be deleted when all Jobs and Tasks under the Job Schedule have been deleted.") - Deleting: "deleting", -} - -enum OnAllTasksComplete { - @doc("Do nothing. The Job remains active unless terminated or disabled by some other means.") - NoAction: "noaction", - - @doc("") - TerminateJob: "terminatejob", -} - -enum OnTaskFailure { - @doc("Do nothing. The Job remains active unless terminated or disabled by some other means.") - NoAction: "noaction", - - @doc("Terminate the Job. The Job's terminateReason is set to 'AllTasksComplete'.") - PerformExitOptionsJobAction: "performexitoptionsjobaction", -} - -enum ContainerWorkingDirectory { - @doc("Use the standard Batch service Task working directory, which will contain the Task Resource Files populated by Batch.") - TaskWorkingDirectory: "taskWorkingDirectory", - - @doc("Use the working directory defined in the container Image. Beware that this directory will not contain the Resource Files downloaded by Batch.") - ContainerImageDefault: "containerImageDefault", -} - -enum OutputFileUploadCondition { - @doc("Upload the file(s) only after the Task process exits with an exit code of 0.") - TaskSuccess: "tasksuccess", - - @doc("Upload the file(s) only after the Task process exits with a nonzero exit code.") - TaskFailure: "taskfailure", - - @doc("Upload the file(s) after the Task process exits, no matter what the exit code was.") - TaskCompletion: "taskcompletion", -} - -enum AutoUserScope { - @doc("Specifies that the service should create a new user for the Task.") - Task: "task", - - @doc("Specifies that the Task runs as the common auto user Account which is created on every Compute Node in a Pool.") - Pool: "pool", -} - -enum ElevationLevel { - @doc("The user is a standard user without elevated access.") - NonAdmin: "nonadmin", - - @doc("The user is a user with elevated access and operates with full Administrator permissions.") - Admin: "admin", -} - -enum PoolLifetimeOption { - @doc("The Pool exists for the lifetime of the Job Schedule. The Batch Service creates the Pool when it creates the first Job on the schedule. You may apply this option only to Job Schedules, not to Jobs.") - JobSchedule: "jobschedule", - - @doc("The Pool exists for the lifetime of the Job to which it is dedicated. The Batch service creates the Pool when it creates the Job. If the 'job' option is applied to a Job Schedule, the Batch service creates a new auto Pool for every Job created on the schedule.") - Job: "job", -} - -enum CachingType { - @doc("The caching mode for the disk is not enabled.") - None: "none", - - @doc("The caching mode for the disk is read only.") - ReadOnly: "readonly", - - @doc("The caching mode for the disk is read and write.") - ReadWrite: "readwrite", -} - -enum StorageAccountType { - @doc("The data disk should use standard locally redundant storage.") - StandardLRS: "standard_lrs", - - @doc("The data disk should use premium locally redundant storage.") - PremiumLRS: "premium_lrs", -} - -enum DiskEncryptionTarget { - @doc("The OS Disk on the compute node is encrypted.") - OsDisk: "osdisk", - - @doc("The temporary disk on the compute node is encrypted. On Linux this encryption applies to other partitions (such as those on mounted data disks) when encryption occurs at boot time.") - TemporaryDisk: "temporarydisk", -} - -enum NodePlacementPolicyType { - @doc("All nodes in the pool will be allocated in the same region.") - Regional: "regional", - - @doc("Nodes in the pool will be spread across different availability zones with best effort balancing.") - Zonal: "zonal", -} - -enum ComputeNodeFillType { - @doc("Tasks should be assigned evenly across all Compute Nodes in the Pool.") - Spread: "spread", - - @doc("As many Tasks as possible (taskSlotsPerNode) should be assigned to each Compute Node in the Pool before any Tasks are assigned to the next Compute Node in the Pool.") - Pack: "pack", -} - -enum DynamicVNetAssignmentScope { - @doc("No dynamic VNet assignment is enabled.") - None: "none", - - @doc("Dynamic VNet assignment is done per-job.") - Job: "job", -} - -enum InboundEndpointProtocol { - @doc("Use TCP for the endpoint.") - Tcp: "tcp", - - @doc("Use UDP for the endpoint.") - Udp: "udp", -} - -enum NetworkSecurityGroupRuleAccess { - @doc("Allow access.") - Allow: "allow", - - @doc("Deny access.") - Deny: "deny", -} - -enum IPAddressProvisioningType { - @doc("A public IP will be created and managed by Batch. There may be multiple public IPs depending on the size of the Pool.") - BatchManaged: "batchmanaged", - - @doc("Public IPs are provided by the user and will be used to provision the Compute Nodes.") - UserManaged: "usermanaged", - - @doc("No public IP Address will be created.") - NoPublicIPAddresses: "nopublicipaddresses", -} - -enum DiffDiskPlacement { - @doc("The Ephemeral OS Disk is stored on the VM cache.") - CacheDisk: "cachedisk", -} - -enum ContainerType { - @doc("A Docker compatible container technology will be used to launch the containers.") - DockerCompatible: "dockerCompatible", -} -enum AccessScope { - @doc("Grants access to perform all operations on the Job containing the Task.") - Job: "job", -} - -enum CertificateStoreLocation { - @doc("Certificates should be installed to the CurrentUser Certificate store.") - CurrentUser: "currentuser", - - @doc("Certificates should be installed to the LocalMachine Certificate store.") - LocalMachine: "localmachine", -} - -enum CertificateVisibility { - @doc("The Certificate should be visible to the user account under which the StartTask is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this certificate will be visible to the Task as well.") - StartTask: "starttask", - - @doc("The Certificate should be visible to the user accounts under which Job Tasks are run.") - Task: "task", - - @doc("The Certificate should be visible to the user accounts under which users remotely access the Compute Node.") - RemoteUser: "remoteuser", -} - -enum LoginMode { - @doc("The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes.") - Batch: "batch", - - @doc("The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows VirtualMachineConfiguration Pools. If this option is used with an elevated user identity in a Windows VirtualMachineConfiguration Pool, the user session will not be elevated unless the application executed by the Task command line is configured to always require administrative privilege or to always require maximum privilege.") - Interactive: "interactive", -} - -enum NodeCommunicationMode { - @doc("The node communication mode is automatically set by the Batch service.") - Default: "default", - - @doc("Nodes using the classic communication mode require inbound TCP communication on ports 29876 and 29877 from the \"BatchNodeManagement.{region}\" service tag and outbound TCP communication on port 443 to the \"Storage.region\" and \"BatchNodeManagement.{region}\" service tags.") - Classic: "classic", - - @doc("Nodes using the simplified communication mode require outbound TCP communication on port 443 to the \"BatchNodeManagement.{region}\" service tag. No open inbound ports are required.") - Simplified: "simplified", -} - -enum JobState { - @doc("The Job is available to have Tasks scheduled.") - Active: "active", - - @doc("A user has requested that the Job be disabled, but the disable operation is still in progress (for example, waiting for Tasks to terminate).") - Disabling: "disabling", - - @doc("A user has disabled the Job. No Tasks are running, and no new Tasks will be scheduled.") - Disabled: "disabled", - - @doc("A user has requested that the Job be enabled, but the enable operation is still in progress.") - Enabling: "enabling", - - @doc("The Job is about to complete, either because a Job Manager Task has completed or because the user has terminated the Job, but the terminate operation is still in progress (for example, because Job Release Tasks are running).") - Terminating: "terminating", - - @doc("All Tasks have terminated, and the system will not accept any more Tasks or any further changes to the Job.") - Completed: "completed", - - @doc("A user has requested that the Job be deleted, but the delete operation is still in progress (for example, because the system is still terminating running Tasks).") - Deleting: "deleting", -} - -enum ErrorCategory { - @doc("The error is due to a user issue, such as misconfiguration.") - UserError: "usererror", - - @doc("The error is due to an internal server issue.") - ServerError: "servererror", -} - -enum DisableJobOption { - @doc("Terminate running Tasks and requeue them. The Tasks will run again when the Job is enabled.") - Requeue: "requeue", - - @doc("Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again.") - Terminate: "terminate", - - @doc("Allow currently running Tasks to complete.") - Wait: "wait", -} - -enum JobPreparationTaskState { - @doc("The Task is currently running (including retrying).") - Running: "running", - - @doc("The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures).") - Completed: "completed", -} - -enum TaskExecutionResult { - @doc("The Task ran successfully.") - Success: "success", - - @doc("There was an error during processing of the Task. The failure may have occurred before the Task process was launched, while the Task process was executing, or after the Task process exited.") - Failure: "failure", -} - -enum JobReleaseTaskState { - @doc("The Task is currently running (including retrying).") - Running: "running", - - @doc("The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures).") - Completed: "completed", -} - -enum PoolState { - @doc("The Pool is available to run Tasks subject to the availability of Compute Nodes.") - Active: "active", - - @doc("The user has requested that the Pool be deleted, but the delete operation has not yet completed.") - Deleting: "deleting", -} - -enum AllocationState { - @doc("The Pool is not resizing. There are no changes to the number of Compute Nodes in the Pool in progress. A Pool enters this state when it is created and when no operations are being performed on the Pool to change the number of Compute Nodes.") - Steady: "steady", - - @doc("The Pool is resizing; that is, Compute Nodes are being added to or removed from the Pool.") - Resizing: "resizing", - - @doc("The Pool was resizing, but the user has requested that the resize be stopped, but the stop request has not yet been completed.") - Stopping: "stopping", -} - -enum PoolIdentityType { - @doc("Batch pool has user assigned identities with it.") - "UserAssigned", - - @doc("Batch pool has no identity associated with it. Setting `None` in update pool will remove existing identities.") - "None", -} - -enum ComputeNodeDeallocationOption { - @doc("Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Remove Compute Nodes as soon as Tasks have been terminated.") - Requeue: "requeue", - - @doc("Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove Compute Nodes as soon as Tasks have been terminated.") - Terminate: "terminate", - - @doc("Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Remove Compute Nodes when all Tasks have completed.") - TaskCompletion: "taskcompletion", - - @doc("Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Remove Compute Nodes when all Task retention periods have expired.") - RetainedData: "retaineddata", -} - -enum JobAction { - @doc("Take no action.") - None: "none", - - @doc("Disable the Job. This is equivalent to calling the disable Job API, with a disableTasks value of requeue.") - Disable: "disable", - - @doc("Terminate the Job. The terminateReason in the Job's executionInfo is set to \"TaskFailed\".") - Terminate: "terminate", -} - -enum DependencyAction { - @doc("Satisfy tasks waiting on this task; once all dependencies are satisfied, the task will be scheduled to run.") - Satisfy: "satisfy", - - @doc("Blocks tasks waiting on this task, preventing them from being scheduled.") - Block: "block", -} - -enum TaskState { - @doc("The Task is queued and able to run, but is not currently assigned to a Compute Node. A Task enters this state when it is created, when it is enabled after being disabled, or when it is awaiting a retry after a failed run.") - Active: "active", - - @doc("The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node.") - Preparing: "preparing", - - @doc("The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing.") - Running: "running", - - @doc("The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated.") - Completed: "completed", -} - -enum TaskAddStatus { - @doc("The Task was added successfully.") - Success: "success", - - @doc("The Task failed to add due to a client error and should not be retried without modifying the request as appropriate.") - ClientError: "clienterror", - - @doc("Task failed to add due to a server error and can be retried without modification.") - ServerError: "servererror", -} - -enum SubtaskState { - @doc("The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node.") - Preparing: "preparing", - - @doc("The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing.") - Running: "running", - - @doc("The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated.") - Completed: "completed", -} - -enum ComputeNodeState { - @doc("The Compute Node is not currently running a Task.") - Idle: "idle", - - @doc("The Compute Node is rebooting.") - Rebooting: "rebooting", - - @doc("The Compute Node is reimaging.") - Reimaging: "reimaging", - - @doc("The Compute Node is running one or more Tasks (other than a StartTask).") - Running: "running", - - @doc("The Compute Node cannot be used for Task execution due to errors.") - Unusable: "unusable", - - @doc("The Batch service has obtained the underlying virtual machine from Azure Compute, but it has not yet started to join the Pool.") - Creating: "creating", - - @doc("The Batch service is starting on the underlying virtual machine.") - Starting: "starting", - - @doc("The StartTask has started running on the Compute Node, but waitForSuccess is set and the StartTask has not yet completed.") - WaitingForStartTask: "waitingforstarttask", - - @doc("The StartTask has failed on the Compute Node (and exhausted all retries), and waitForSuccess is set. The Compute Node is not usable for running Tasks.") - StartTaskFailed: "starttaskfailed", - - @doc("The Batch service has lost contact with the Compute Node, and does not know its true state.") - Unknown: "unknown", - - @doc("The Compute Node is leaving the Pool, either because the user explicitly removed it or because the Pool is resizing or autoscaling down.") - LeavingPool: "leavingpool", - - @doc("The Compute Node is not currently running a Task, and scheduling of new Tasks to the Compute Node is disabled.") - Offline: "offline", - - @doc("The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available.") - Preempted: "preempted", -} - -enum SchedulingState { - @doc("Tasks can be scheduled on the Compute Node.") - Enabled: "enabled", - - @doc("No new Tasks will be scheduled on the Compute Node. Tasks already running on the Compute Node may still run to completion. All Compute Nodes start with scheduling enabled.") - Disabled: "disabled", -} - -enum StartTaskState { - @doc("The StartTask is currently running.") - Running: "running", - - @doc("The StartTask has exited with exit code 0, or the StartTask has failed and the retry limit has reached, or the StartTask process did not run due to Task preparation errors (such as resource file download failures).") - Completed: "completed", -} - -enum ComputeNodeRebootOption { - @doc("Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Restart the Compute Node as soon as Tasks have been terminated.") - Requeue: "requeue", - - @doc("Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Restart the Compute Node as soon as Tasks have been terminated.") - Terminate: "terminate", - - @doc("Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Restart the Compute Node when all Tasks have completed.") - TaskCompletion: "taskcompletion", - - @doc("Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Restart the Compute Node when all Task retention periods have expired.") - RetainedData: "retaineddata", -} - -enum ComputeNodeReimageOption { - @doc("Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Reimage the Compute Node as soon as Tasks have been terminated.") - Requeue: "requeue", - - @doc("Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Reimage the Compute Node as soon as Tasks have been terminated.") - Terminate: "terminate", - - @doc("Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Reimage the Compute Node when all Tasks have completed.") - TaskCompletion: "taskcompletion", - - @doc("Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Reimage the Compute Node when all Task retention periods have expired.") - RetainedData: "retaineddata", -} - -enum DisableComputeNodeSchedulingOption { - @doc("Terminate running Task processes and requeue the Tasks. The Tasks may run again on other Compute Nodes, or when Task scheduling is re-enabled on this Compute Node. Enter offline state as soon as Tasks have been terminated.") - Requeue: "requeue", - - @doc("Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as Tasks have been terminated.") - Terminate: "terminate", - - @doc("Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Enter offline state when all Tasks have completed.") - TaskCompletion: "taskcompletion", -} - -enum StatusLevelTypes { - @doc("") - "Error", - - @doc("") - "Info", - - @doc("") - "Warning", -} - -@doc("The result of listing the applications available in an Account.") -@pagedResult -model ApplicationListResult { - @doc("The list of applications available in the Account.") - @items - value?: Application[]; - - #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("Contains information about an application in an Azure Batch Account.") -@resource("applications") -model Application { - @doc("A string that uniquely identifies the application within the Account.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key("applicationId") - id: string; - - @doc("The display name for the application.") - displayName: string; - - @doc("The list of available versions of the application.") - versions: string[]; -} - -@doc("An error response received from the Azure Batch service.") -@error -model BatchError { - @doc(""" -An identifier for the error. Codes are invariant and are intended to be -consumed programmatically. -""") - code?: string; - - @doc("An error message received in an Azure Batch error response.") - message?: ErrorMessage; - - @doc("A collection of key-value pairs containing additional details about the error.") - values?: BatchErrorDetail[]; -} - -@doc("An error message received in an Azure Batch error response.") -model ErrorMessage { - @doc("The language code of the error message") - lang?: string; - - @doc("The text of the message.") - value?: string; -} - -@doc("An item of additional information included in an Azure Batch error response.") -model BatchErrorDetail { - @doc("An identifier specifying the meaning of the Value property.") - key?: string; - - @doc("The additional information included with the error response.") - value?: string; -} - -@doc("The result of a listing the usage metrics for an Account.") -@pagedResult -model PoolListUsageMetricsResult { - @doc("The Pool usage metrics data.") - @items - value?: PoolUsageMetrics[]; - - #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("Usage metrics for a Pool across an aggregation interval.") -@resource("poolusagemetrics") -model PoolUsageMetrics { - @doc("The ID of the Pool whose metrics are aggregated in this entry.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - poolId: string; - - @doc("The start time of the aggregation interval covered by this entry.") - startTime: utcDateTime; - - @doc("The end time of the aggregation interval covered by this entry.") - endTime: utcDateTime; - - @doc(""" -For information about available sizes of virtual machines in Pools, see Choose -a VM size for Compute Nodes in an Azure Batch Pool -(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). -""") - vmSize: string; - - @doc("The total core hours used in the Pool during this aggregation interval.") - totalCoreHours: float32; -} - -@doc("The result of listing the supported Virtual Machine Images.") -@pagedResult -model AccountListSupportedImagesResult { - @doc("The list of supported Virtual Machine Images.") - @items - value?: ImageInformation[]; - - #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc(""" -A reference to the Azure Virtual Machines Marketplace Image and additional -information about the Image. -""") -@resource("supportedimages") -model ImageInformation { - #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" - @doc("The ID of the Compute Node agent SKU which the Image supports.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - nodeAgentSKUId: string; - - @doc(""" -A reference to an Azure Virtual Machines Marketplace Image or a Shared Image -Gallery Image. To get the list of all Azure Marketplace Image references -verified by Azure Batch, see the 'List Supported Images' operation. -""") - imageReference: ImageReference; - - @doc("The type of operating system (e.g. Windows or Linux) of the Image.") - osType: OSType; - - @doc(""" -Not every capability of the Image is listed. Capabilities in this list are -considered of special interest and are generally related to integration with -other features in the Azure Batch service. -""") - capabilities?: string[]; - - @doc(""" -The time when the Azure Batch service will stop accepting create Pool requests -for the Image. -""") - batchSupportEndOfLife?: utcDateTime; - - @doc(""" -Whether the Azure Batch service actively verifies that the Image is compatible -with the associated Compute Node agent SKU. -""") - verificationType: VerificationType; -} - -@doc(""" -A reference to an Azure Virtual Machines Marketplace Image or a Shared Image -Gallery Image. To get the list of all Azure Marketplace Image references -verified by Azure Batch, see the 'List Supported Images' operation. -""") -model ImageReference { - @doc("For example, Canonical or MicrosoftWindowsServer.") - publisher?: string; - - @doc("For example, UbuntuServer or WindowsServer.") - offer?: string; - - @doc("For example, 18.04-LTS or 2019-Datacenter.") - sku?: string; - - @doc(""" -A value of 'latest' can be specified to select the latest version of an Image. -If omitted, the default is 'latest'. -""") - version?: string; - - @doc(""" -This property is mutually exclusive with other ImageReference properties. The -Shared Image Gallery Image must have replicas in the same region and must be in -the same subscription as the Azure Batch account. If the image version is not -specified in the imageId, the latest version will be used. For information -about the firewall settings for the Batch Compute Node agent to communicate -with the Batch service see -https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. -""") - virtualMachineImageId?: string; - - @doc(""" -The specific version of the platform image or marketplace image used to create -the node. This read-only field differs from 'version' only if the value -specified for 'version' when the pool was created was 'latest'. -""") - @visibility("read") - exactVersion?: string; -} - -@doc("The result of listing the Compute Node counts in the Account.") -@pagedResult -model PoolNodeCountsListResult { - @doc("A list of Compute Node counts by Pool.") - @items - value?: PoolNodeCounts[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("The number of Compute Nodes in each state for a Pool.") -@resource("nodecounts") -model PoolNodeCounts { - @doc("The ID of the Pool.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - poolId: string; - - @doc("The number of Compute Nodes in each Compute Node state.") - dedicated?: NodeCounts; - - @doc("The number of Compute Nodes in each Compute Node state.") - lowPriority?: NodeCounts; -} - -@doc("The number of Compute Nodes in each Compute Node state.") -model NodeCounts { - @doc("The number of Compute Nodes in the creating state.") - creating: int32; - - @doc("The number of Compute Nodes in the idle state.") - idle: int32; - - @doc("The number of Compute Nodes in the offline state.") - offline: int32; - - @doc("The number of Compute Nodes in the preempted state.") - preempted: int32; - - @doc("The count of Compute Nodes in the rebooting state.") - rebooting: int32; - - @doc("The number of Compute Nodes in the reimaging state.") - reimaging: int32; - - @doc("The number of Compute Nodes in the running state.") - running: int32; - - @doc("The number of Compute Nodes in the starting state.") - starting: int32; - - @doc("The number of Compute Nodes in the startTaskFailed state.") - startTaskFailed: int32; - - @doc("The number of Compute Nodes in the leavingPool state.") - leavingPool: int32; - - @doc("The number of Compute Nodes in the unknown state.") - "unknown": int32; - - @doc("The number of Compute Nodes in the unusable state.") - unusable: int32; - - @doc("The number of Compute Nodes in the waitingForStartTask state.") - waitingForStartTask: int32; - - @doc("The total number of Compute Nodes.") - total: int32; -} - -@doc("Contains utilization and resource usage statistics for the lifetime of a Pool.") -model PoolStatistics { - @doc("The URL for the statistics.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - url: string; - - @doc("The start time of the time range covered by the statistics.") - startTime: utcDateTime; - - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") - lastUpdateTime: utcDateTime; - - @doc("Statistics related to Pool usage information.") - usageStats?: UsageStatistics; - - @doc("Statistics related to resource consumption by Compute Nodes in a Pool.") - resourceStats?: ResourceStatistics; -} - -@doc("Statistics related to Pool usage information.") -model UsageStatistics { - @doc("The start time of the time range covered by the statistics.") - startTime: utcDateTime; - - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") - lastUpdateTime: utcDateTime; - - @doc(""" -The aggregated wall-clock time of the dedicated Compute Node cores being part -of the Pool. -""") - dedicatedCoreTime: duration; -} - -@doc("Statistics related to resource consumption by Compute Nodes in a Pool.") -model ResourceStatistics { - @doc("The start time of the time range covered by the statistics.") - startTime: utcDateTime; - - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") - lastUpdateTime: utcDateTime; - - @doc(""" -The average CPU usage across all Compute Nodes in the Pool (percentage per -node). -""") - avgCPUPercentage: float32; - - @doc("The average memory usage in GiB across all Compute Nodes in the Pool.") - avgMemoryGiB: float32; - - @doc("The peak memory usage in GiB across all Compute Nodes in the Pool.") - peakMemoryGiB: float32; - - @doc("The average used disk space in GiB across all Compute Nodes in the Pool.") - avgDiskGiB: float32; - - @doc("The peak used disk space in GiB across all Compute Nodes in the Pool.") - peakDiskGiB: float32; - - @doc("The total number of disk read operations across all Compute Nodes in the Pool.") - diskReadIOps: int32; - - @doc("The total number of disk write operations across all Compute Nodes in the Pool.") - diskWriteIOps: int32; - - @doc(""" -The total amount of data in GiB of disk reads across all Compute Nodes in the -Pool. -""") - diskReadGiB: float32; - - @doc(""" -The total amount of data in GiB of disk writes across all Compute Nodes in the -Pool. -""") - diskWriteGiB: float32; - - @doc(""" -The total amount of data in GiB of network reads across all Compute Nodes in -the Pool. -""") - networkReadGiB: float32; - - @doc(""" -The total amount of data in GiB of network writes across all Compute Nodes in -the Pool. -""") - networkWriteGiB: float32; -} - -@doc("Resource usage statistics for a Job.") -model JobStatistics { - @doc("The URL of the statistics.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - url: string; - - @doc("The start time of the time range covered by the statistics.") - startTime: utcDateTime; - - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") - lastUpdateTime: utcDateTime; - - @doc(""" -The total user mode CPU time (summed across all cores and all Compute Nodes) -consumed by all Tasks in the Job. -""") - userCPUTime: duration; - - @doc(""" -The total kernel mode CPU time (summed across all cores and all Compute Nodes) -consumed by all Tasks in the Job. -""") - kernelCPUTime: duration; - - @doc(""" - The wall clock time is the elapsed time from when the Task started running on -a Compute Node to when it finished (or to the last time the statistics were -updated, if the Task had not finished by then). If a Task was retried, this -includes the wall clock time of all the Task retries. -""") - wallClockTime: duration; - - @doc("The total number of disk read operations made by all Tasks in the Job.") - readIOps: int32; - - @doc("The total number of disk write operations made by all Tasks in the Job.") - writeIOps: int32; - - @doc("The total amount of data in GiB read from disk by all Tasks in the Job.") - readIOGiB: float32; - - @doc("The total amount of data in GiB written to disk by all Tasks in the Job.") - writeIOGiB: float32; - - @doc("A Task completes successfully if it returns exit code 0.") - numSucceededTasks: int32; - - @doc(""" -A Task fails if it exhausts its maximum retry count without returning exit code -0. -""") - numFailedTasks: int32; - - @doc(""" -The total number of retries on all the Tasks in the Job during the given time -range. -""") - numTaskRetries: int32; - - @doc(""" -The wait time for a Task is defined as the elapsed time between the creation of -the Task and the start of Task execution. (If the Task is retried due to -failures, the wait time is the time to the most recent Task execution.) This -value is only reported in the Account lifetime statistics; it is not included -in the Job statistics. -""") - waitTime: duration; -} - -@doc(""" -A Certificate that can be installed on Compute Nodes and can be used to -authenticate operations on the machine. -""") -model Certificate { - @doc(""" -The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex -digits. -""") - thumbprint?: string; - - @doc("The algorithm used to derive the thumbprint.") - thumbprintAlgorithm?: string; - - @doc("The URL of the Certificate.") - @visibility("read") - url?: string; - - @doc("The state of the Certificate.") - @visibility("read") - state?: CertificateState; - - @doc("The time at which the Certificate entered its current state.") - @visibility("read") - stateTransitionTime?: utcDateTime; - - @doc("This property is not set if the Certificate is in its initial active state.") - @visibility("read") - previousState?: CertificateState; - - @doc("This property is not set if the Certificate is in its initial Active state.") - @visibility("read") - previousStateTransitionTime?: utcDateTime; - - @doc("The public part of the Certificate as a base-64 encoded .cer file.") - @visibility("read") - publicData?: string; - - @doc("This property is set only if the Certificate is in the DeleteFailed state.") - @visibility("read") - deleteCertificateError?: DeleteCertificateError; - - @doc("The base64-encoded contents of the Certificate. The maximum size is 10KB.") - data?: string; - - @doc("The format of the Certificate data.") - certificateFormat?: CertificateFormat; - - @doc("This must be omitted if the Certificate format is cer.") - password?: string; -} - -@doc("An error encountered by the Batch service when deleting a Certificate.") -model DeleteCertificateError { - @doc(""" -An identifier for the Certificate deletion error. Codes are invariant and are -intended to be consumed programmatically. -""") - code?: string; - - @doc(""" -A message describing the Certificate deletion error, intended to be suitable -for display in a user interface. -""") - message?: string; - - @doc(""" -This list includes details such as the active Pools and Compute Nodes -referencing this Certificate. However, if a large number of resources reference -the Certificate, the list contains only about the first hundred. -""") - values?: NameValuePair[]; -} - -@doc("Represents a name-value pair.") -model NameValuePair { - @doc("The name in the name-value pair.") - name?: string; - - @doc("The value in the name-value pair.") - value?: string; -} - -@doc("The result of listing the Certificates in the Account.") -@pagedResult -model CertificateListResult { - @doc("The list of Certificates.") - @items - value?: Certificate[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc(""" -The result of listing the files on a Compute Node, or the files associated with -a Task on a Compute Node. -""") -@pagedResult -model NodeFileListResult { - @doc("The list of files.") - @items - value?: NodeFile[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("Information about a file or directory on a Compute Node.") -model NodeFile { - @doc("The file path.") - name?: string; - - @doc("The URL of the file.") - url?: string; - - @doc("Whether the object represents a directory.") - isDirectory?: boolean; - - @doc("The properties of a file on a Compute Node.") - properties?: FileProperties; -} - -@doc("The properties of a file on a Compute Node.") -model FileProperties { - @doc("The creation time is not returned for files on Linux Compute Nodes.") - creationTime?: utcDateTime; - - @doc("The time at which the file was last modified.") - lastModified: utcDateTime; - - @doc("The length of the file.") - contentLength: int32; - - @doc("The content type of the file.") - contentType?: string; - - @doc("The file mode is returned only for files on Linux Compute Nodes.") - fileMode?: string; -} - -@doc(""" -A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a -specification used to create each Job. -""") -model BatchJobSchedule { - @doc("A string that uniquely identifies the schedule within the Account.") - @visibility("read", "create") - id?: string; - - @doc("The display name for the schedule.") - @visibility("read", "create") - displayName?: string; - - @doc("The URL of the Job Schedule.") - @visibility("read") - url?: string; - - @doc(""" -This is an opaque string. You can use it to detect whether the Job Schedule has -changed between requests. In particular, you can be pass the ETag with an -Update Job Schedule request to specify that your changes should take effect -only if nobody else has modified the schedule in the meantime. -""") - @visibility("read") - eTag?: string; - - @doc(""" -This is the last time at which the schedule level data, such as the Job -specification or recurrence information, changed. It does not factor in -job-level changes such as new Jobs being created or Jobs changing state. -""") - @visibility("read") - lastModified?: utcDateTime; - - @doc("The creation time of the Job Schedule.") - @visibility("read") - creationTime?: utcDateTime; - - @doc("The state of the Job Schedule.") - @visibility("read") - state?: JobScheduleState; - - @doc("The time at which the Job Schedule entered the current state.") - @visibility("read") - stateTransitionTime?: utcDateTime; - - @doc("This property is not present if the Job Schedule is in its initial active state.") - @visibility("read") - previousState?: JobScheduleState; - - @doc("This property is not present if the Job Schedule is in its initial active state.") - @visibility("read") - previousStateTransitionTime?: utcDateTime; - - @doc(""" -All times are fixed respective to UTC and are not impacted by daylight saving -time. -""") - schedule?: Schedule; - - @doc("Specifies details of the Jobs to be created on a schedule.") - jobSpecification?: JobSpecification; - - @doc(""" -Contains information about Jobs that have been and will be run under a Job -Schedule. -""") - @visibility("read") - executionInfo?: JobScheduleExecutionInformation; - - @doc(""" -The Batch service does not assign any meaning to metadata; it is solely for the -use of user code. -""") - metadata?: MetadataItem[]; - - @doc("Resource usage statistics for a Job Schedule.") - @visibility("read") - stats?: JobScheduleStatistics; -} - -@doc(""" -The schedule according to which Jobs will be created. All times are fixed -respective to UTC and are not impacted by daylight saving time. -""") -model Schedule { - @doc(""" -If you do not specify a doNotRunUntil time, the schedule becomes ready to -create Jobs immediately. -""") - doNotRunUntil?: utcDateTime; - - @doc(""" -If you do not specify a doNotRunAfter time, and you are creating a recurring -Job Schedule, the Job Schedule will remain active until you explicitly -terminate it. -""") - doNotRunAfter?: utcDateTime; - - @doc(""" -If a Job is not created within the startWindow interval, then the 'opportunity' -is lost; no Job will be created until the next recurrence of the schedule. If -the schedule is recurring, and the startWindow is longer than the recurrence -interval, then this is equivalent to an infinite startWindow, because the Job -that is 'due' in one recurrenceInterval is not carried forward into the next -recurrence interval. The default is infinite. The minimum value is 1 minute. If -you specify a lower value, the Batch service rejects the schedule with an -error; if you are calling the REST API directly, the HTTP status code is 400 -(Bad Request). -""") - startWindow?: duration; - - @doc(""" -Because a Job Schedule can have at most one active Job under it at any given -time, if it is time to create a new Job under a Job Schedule, but the previous -Job is still running, the Batch service will not create the new Job until the -previous Job finishes. If the previous Job does not finish within the -startWindow period of the new recurrenceInterval, then no new Job will be -scheduled for that interval. For recurring Jobs, you should normally specify a -jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you -will need an external process to monitor when Jobs are created, add Tasks to -the Jobs and terminate the Jobs ready for the next recurrence. The default is -that the schedule does not recur: one Job is created, within the startWindow -after the doNotRunUntil time, and the schedule is complete as soon as that Job -finishes. The minimum value is 1 minute. If you specify a lower value, the -Batch service rejects the schedule with an error; if you are calling the REST -API directly, the HTTP status code is 400 (Bad Request). -""") - recurrenceInterval?: duration; -} - -@doc("Specifies details of the Jobs to be created on a schedule.") -model JobSpecification { - @doc(""" -Priority values can range from -1000 to 1000, with -1000 being the lowest -priority and 1000 being the highest priority. The default value is 0. This -priority is used as the default for all Jobs under the Job Schedule. You can -update a Job's priority after it has been created using by using the update Job -API. -""") - priority?: int32; - - @doc(""" -If the value is set to True, other high priority jobs submitted to the system -will take precedence and will be able requeue tasks from this job. You can -update a job's allowTaskPreemption after it has been created using the update -job API. -""") - allowTaskPreemption?: boolean; - - @doc(""" -The value of maxParallelTasks must be -1 or greater than 0 if specified. If not -specified, the default value is -1, which means there's no limit to the number -of tasks that can be run at once. You can update a job's maxParallelTasks after -it has been created using the update job API. -""") - maxParallelTasks?: int32; - - @doc(""" -The name need not be unique and can contain any Unicode characters up to a -maximum length of 1024. -""") - displayName?: string; - - @doc(""" -Whether Tasks in the Job can define dependencies on each other. The default is -false. -""") - usesTaskDependencies?: boolean; - - @doc(""" -Note that if a Job contains no Tasks, then all Tasks are considered complete. -This option is therefore most commonly used with a Job Manager task; if you -want to use automatic Job termination without a Job Manager, you should -initially set onAllTasksComplete to noaction and update the Job properties to -set onAllTasksComplete to terminatejob once you have finished adding Tasks. The -default is noaction. -""") - onAllTasksComplete?: OnAllTasksComplete; - - @doc("The default is noaction.") - onTaskFailure?: OnTaskFailure; - - @doc("The network configuration for the Job.") - networkConfiguration?: JobNetworkConfiguration; - - @doc("The execution constraints for a Job.") - constraints?: JobConstraints; - - @doc(""" -If the Job does not specify a Job Manager Task, the user must explicitly add -Tasks to the Job using the Task API. If the Job does specify a Job Manager -Task, the Batch service creates the Job Manager Task when the Job is created, -and will try to schedule the Job Manager Task before scheduling other Tasks in -the Job. -""") - jobManagerTask?: JobManagerTask; - - @doc(""" -If a Job has a Job Preparation Task, the Batch service will run the Job -Preparation Task on a Node before starting any Tasks of that Job on that -Compute Node. -""") - jobPreparationTask?: JobPreparationTask; - - @doc(""" -The primary purpose of the Job Release Task is to undo changes to Nodes made by -the Job Preparation Task. Example activities include deleting local files, or -shutting down services that were started as part of Job preparation. A Job -Release Task cannot be specified without also specifying a Job Preparation Task -for the Job. The Batch service runs the Job Release Task on the Compute Nodes -that have run the Job Preparation Task. -""") - jobReleaseTask?: JobReleaseTask; - - @doc(""" -Individual Tasks can override an environment setting specified here by -specifying the same setting name with a different value. -""") - commonEnvironmentSettings?: EnvironmentSetting[]; - - @doc("Specifies how a Job should be assigned to a Pool.") - poolInfo: PoolInformation; - - @doc(""" -The Batch service does not assign any meaning to metadata; it is solely for the -use of user code. -""") - metadata?: MetadataItem[]; -} - -@doc("The network configuration for the Job.") -model JobNetworkConfiguration { - @doc(""" -The virtual network must be in the same region and subscription as the Azure -Batch Account. The specified subnet should have enough free IP addresses to -accommodate the number of Compute Nodes which will run Tasks from the Job. This -can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' -service principal must have the 'Classic Virtual Machine Contributor' -Role-Based Access Control (RBAC) role for the specified VNet so that Azure -Batch service can schedule Tasks on the Nodes. This can be verified by checking -if the specified VNet has any associated Network Security Groups (NSG). If -communication to the Nodes in the specified subnet is denied by an NSG, then -the Batch service will set the state of the Compute Nodes to unusable. This is -of the form -/subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. -If the specified VNet has any associated Network Security Groups (NSG), then a -few reserved system ports must be enabled for inbound communication from the -Azure Batch service. For Pools created with a Virtual Machine configuration, -enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for -Windows. Port 443 is also required to be open for outbound connections for -communications to Azure Storage. For more details see: -https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration -""") - subnetId: string; -} - -@doc("The execution constraints for a Job.") -model JobConstraints { - @doc(""" -If the Job does not complete within the time limit, the Batch service -terminates it and any Tasks that are still running. In this case, the -termination reason will be MaxWallClockTimeExpiry. If this property is not -specified, there is no time limit on how long the Job may run. -""") - maxWallClockTime?: duration; - - @doc(""" -Note that this value specifically controls the number of retries. The Batch -service will try each Task once, and may then retry up to this limit. For -example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one -initial try and 3 retries). If the maximum retry count is 0, the Batch service -does not retry Tasks. If the maximum retry count is -1, the Batch service -retries the Task without limit, however this is not recommended for a start -task or any task. The default value is 0 (no retries) -""") - maxTaskRetryCount?: int32; -} - -@doc(""" -The Job Manager Task is automatically started when the Job is created. The -Batch service tries to schedule the Job Manager Task before any other Tasks in -the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where -Job Manager Tasks are running for as long as possible (that is, Compute Nodes -running 'normal' Tasks are removed before Compute Nodes running Job Manager -Tasks). When a Job Manager Task fails and needs to be restarted, the system -tries to schedule it at the highest priority. If there are no idle Compute -Nodes available, the system may terminate one of the running Tasks in the Pool -and return it to the queue in order to make room for the Job Manager Task to -restart. Note that a Job Manager Task in one Job does not have priority over -Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For -example, if a Job Manager in a priority 0 Job needs to be restarted, it will -not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery -operation is triggered on a Node. Examples of recovery operations include (but -are not limited to) when an unhealthy Node is rebooted or a Compute Node -disappeared due to host failure. Retries due to recovery operations are -independent of and are not counted against the maxTaskRetryCount. Even if the -maxTaskRetryCount is 0, an internal retry due to a recovery operation may -occur. Because of this, all Tasks should be idempotent. This means Tasks need -to tolerate being interrupted and restarted without causing any corruption or -duplicate data. The best practice for long running Tasks is to use some form of -checkpointing. -""") -model JobManagerTask { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores and cannot contain more than 64 characters. -""") - id: string; - - @doc(""" -It need not be unique and can contain any Unicode characters up to a maximum -length of 1024. -""") - displayName?: string; - - @doc(""" -The command line does not run under a shell, and therefore cannot take -advantage of shell features such as environment variable expansion. If you want -to take advantage of such features, you should invoke the shell in the command -line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c -MyCommand\" in Linux. If the command line refers to file paths, it should use a -relative path (relative to the Task working directory), or use the Batch -provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") - commandLine: string; - - @doc(""" -If the Pool that will run this Task has containerConfiguration set, this must -be set as well. If the Pool that will run this Task doesn't have -containerConfiguration set, this must not be set. When this is specified, all -directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure -Batch directories on the node) are mapped into the container, all Task -environment variables are mapped into the container, and the Task command line -is executed in the container. Files produced in the container outside of -AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that -Batch file APIs will not be able to access those files. -""") - containerSettings?: TaskContainerSettings; - - @doc(""" -Files listed under this element are located in the Task's working directory. -There is a maximum size for the list of resource files. When the max size is -exceeded, the request will fail and the response error code will be -RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be -reduced in size. This can be achieved using .zip files, Application Packages, -or Docker Containers. -""") - resourceFiles?: ResourceFile[]; - - @doc(""" -For multi-instance Tasks, the files will only be uploaded from the Compute Node -on which the primary Task is executed. -""") - outputFiles?: OutputFile[]; - - @doc("A list of environment variable settings for the Job Manager Task.") - environmentSettings?: EnvironmentSetting[]; - - @doc("Execution constraints to apply to a Task.") - constraints?: TaskConstraints; - - @doc(""" -The default is 1. A Task can only be scheduled to run on a compute node if the -node has enough free scheduling slots available. For multi-instance Tasks, this -property is not supported and must not be specified. -""") - requiredSlots?: int32; - - @doc(""" -If true, when the Job Manager Task completes, the Batch service marks the Job -as complete. If any Tasks are still running at this time (other than Job -Release), those Tasks are terminated. If false, the completion of the Job -Manager Task does not affect the Job status. In this case, you should either -use the onAllTasksComplete attribute to terminate the Job, or have a client or -user terminate the Job explicitly. An example of this is if the Job Manager -creates a set of Tasks but then takes no further role in their execution. The -default value is true. If you are using the onAllTasksComplete and -onTaskFailure attributes to control Job lifetime, and using the Job Manager -Task only to create the Tasks for the Job (not to monitor progress), then it is -important to set killJobOnCompletion to false. -""") - killJobOnCompletion?: boolean; - - @doc("If omitted, the Task runs as a non-administrative user unique to the Task.") - userIdentity?: UserIdentity; - - @doc(""" -If true, no other Tasks will run on the same Node for as long as the Job -Manager is running. If false, other Tasks can run simultaneously with the Job -Manager on a Compute Node. The Job Manager Task counts normally against the -Compute Node's concurrent Task limit, so this is only relevant if the Compute -Node allows multiple concurrent Tasks. The default value is true. -""") - runExclusive?: boolean; - - @doc(""" -Application Packages are downloaded and deployed to a shared directory, not the -Task working directory. Therefore, if a referenced Application Package is -already on the Compute Node, and is up to date, then it is not re-downloaded; -the existing copy on the Compute Node is used. If a referenced Application -Package cannot be installed, for example because the package has been deleted -or because download failed, the Task fails. -""") - applicationPackageReferences?: ApplicationPackageReference[]; - - @doc(""" -If this property is set, the Batch service provides the Task with an -authentication token which can be used to authenticate Batch service operations -without requiring an Account access key. The token is provided via the -AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the -Task can carry out using the token depend on the settings. For example, a Task -can request Job permissions in order to add other Tasks to the Job, or check -the status of the Job or of other Tasks under the Job. -""") - authenticationTokenSettings?: AuthenticationTokenSettings; - - @doc("The default value is true.") - allowLowPriorityNode?: boolean; -} - -@doc("The container settings for a Task.") -model TaskContainerSettings { - @doc(""" -These additional options are supplied as arguments to the \"docker create\" -command, in addition to those controlled by the Batch Service. -""") - containerRunOptions?: string; - - @doc(""" -This is the full Image reference, as would be specified to \"docker pull\". If -no tag is provided as part of the Image name, the tag \":latest\" is used as a -default. -""") - imageName: string; - - @doc("This setting can be omitted if was already provided at Pool creation.") - registry?: ContainerRegistry; - - @doc("The default is 'taskWorkingDirectory'.") - workingDirectory?: ContainerWorkingDirectory; -} - -@doc("A private container registry.") -model ContainerRegistry { - @doc("The user name to log into the registry server.") - username?: string; - - @doc("The password to log into the registry server.") - password?: string; - - @doc("If omitted, the default is \"docker.io\".") - registryServer?: string; - - @doc(""" -The reference to a user assigned identity associated with the Batch pool which -a compute node will use. -""") - identityReference?: ComputeNodeIdentityReference; -} - -@doc(""" -The reference to a user assigned identity associated with the Batch pool which -a compute node will use. -""") -model ComputeNodeIdentityReference { - @doc("The ARM resource id of the user assigned identity.") - resourceId?: string; -} - -@doc("A single file or multiple files to be downloaded to a Compute Node.") -model ResourceFile { - @doc(""" -The autoStorageContainerName, storageContainerUrl and httpUrl properties are -mutually exclusive and one of them must be specified. -""") - autoStorageContainerName?: string; - - @doc(""" -The autoStorageContainerName, storageContainerUrl and httpUrl properties are -mutually exclusive and one of them must be specified. This URL must be readable -and listable from compute nodes. There are three ways to get such a URL for a -container in Azure storage: include a Shared Access Signature (SAS) granting -read and list permissions on the container, use a managed identity with read -and list permissions, or set the ACL for the container to allow public access. -""") - storageContainerUrl?: string; - - @doc(""" -The autoStorageContainerName, storageContainerUrl and httpUrl properties are -mutually exclusive and one of them must be specified. If the URL points to -Azure Blob Storage, it must be readable from compute nodes. There are three -ways to get such a URL for a blob in Azure storage: include a Shared Access -Signature (SAS) granting read permissions on the blob, use a managed identity -with read permission, or set the ACL for the blob or its container to allow -public access. -""") - httpUrl?: string; - - @doc(""" -The property is valid only when autoStorageContainerName or storageContainerUrl -is used. This prefix can be a partial filename or a subdirectory. If a prefix -is not specified, all the files in the container will be downloaded. -""") - blobPrefix?: string; - - @doc(""" -If the httpUrl property is specified, the filePath is required and describes -the path which the file will be downloaded to, including the filename. -Otherwise, if the autoStorageContainerName or storageContainerUrl property is -specified, filePath is optional and is the directory to download the files to. -In the case where filePath is used as a directory, any directory structure -already associated with the input data will be retained in full and appended to -the specified filePath directory. The specified relative path cannot break out -of the Task's working directory (for example by using '..'). -""") - filePath?: string; - - @doc(""" -This property applies only to files being downloaded to Linux Compute Nodes. It -will be ignored if it is specified for a resourceFile which will be downloaded -to a Windows Compute Node. If this property is not specified for a Linux -Compute Node, then a default value of 0770 is applied to the file. -""") - fileMode?: string; - - @doc(""" -The reference to a user assigned identity associated with the Batch pool which -a compute node will use. -""") - identityReference?: ComputeNodeIdentityReference; -} - -@doc(""" -On every file uploads, Batch service writes two log files to the compute node, -'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn -more about a specific failure. -""") -model OutputFile { - @doc(""" -Both relative and absolute paths are supported. Relative paths are relative to -the Task working directory. The following wildcards are supported: * matches 0 -or more characters (for example pattern abc* would match abc or abcdef), ** -matches any directory, ? matches any single character, [abc] matches one -character in the brackets, and [a-c] matches one character in the range. -Brackets can include a negation to match any character not specified (for -example [!abc] matches any character but a, b, or c). If a file name starts -with \".\" it is ignored by default but may be matched by specifying it -explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple -example: **\\*.txt matches any file that does not start in '.' and ends with -.txt in the Task working directory or any subdirectory. If the filename -contains a wildcard character it can be escaped using brackets (for example -abc[*] would match a file named abc*). Note that both \\ and / are treated as -directory separators on Windows, but only / is on Linux. Environment variables -(%var% on Windows or $var on Linux) are expanded prior to the pattern being -applied. -""") - filePattern: string; - - @doc("The destination to which a file should be uploaded.") - destination: OutputFileDestination; - - @doc(""" -Details about an output file upload operation, including under what conditions -to perform the upload. -""") - uploadOptions: OutputFileUploadOptions; -} - -@doc("The destination to which a file should be uploaded.") -model OutputFileDestination { - @doc("Specifies a file upload destination within an Azure blob storage container.") - container?: OutputFileBlobContainerDestination; -} - -@doc("Specifies a file upload destination within an Azure blob storage container.") -model OutputFileBlobContainerDestination { - @doc(""" -If filePattern refers to a specific file (i.e. contains no wildcards), then -path is the name of the blob to which to upload that file. If filePattern -contains one or more wildcards (and therefore may match multiple files), then -path is the name of the blob virtual directory (which is prepended to each blob -name) to which to upload the file(s). If omitted, file(s) are uploaded to the -root of the container with a blob name matching their file name. -""") - path?: string; - - @doc(""" -If not using a managed identity, the URL must include a Shared Access Signature -(SAS) granting write permissions to the container. -""") - containerUrl: string; - - @doc("The identity must have write access to the Azure Blob Storage container") - identityReference?: ComputeNodeIdentityReference; - - @doc(""" -These headers will be specified when uploading files to Azure Storage. Official -document on allowed headers when uploading blobs: -https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types -""") - uploadHeaders?: HttpHeader[]; -} - -@doc("An HTTP header name-value pair") -model HttpHeader { - @doc("The case-insensitive name of the header to be used while uploading output files") - name: string; - - @doc("The value of the header to be used while uploading output files") - value?: string; -} - -@doc(""" -Details about an output file upload operation, including under what conditions -to perform the upload. -""") -model OutputFileUploadOptions { - @doc("The default is taskcompletion.") - uploadCondition: OutputFileUploadCondition; -} - -@doc("An environment variable to be set on a Task process.") -model EnvironmentSetting { - @doc("The name of the environment variable.") - name: string; - - @doc("The value of the environment variable.") - value?: string; -} - -@doc("Execution constraints to apply to a Task.") -model TaskConstraints { - @doc("If this is not specified, there is no time limit on how long the Task may run.") - maxWallClockTime?: duration; - - @doc(""" -The default is 7 days, i.e. the Task directory will be retained for 7 days -unless the Compute Node is removed or the Job is deleted. -""") - retentionTime?: duration; - - @doc(""" -Note that this value specifically controls the number of retries for the Task -executable due to a nonzero exit code. The Batch service will try the Task -once, and may then retry up to this limit. For example, if the maximum retry -count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). -If the maximum retry count is 0, the Batch service does not retry the Task -after the first attempt. If the maximum retry count is -1, the Batch service -retries the Task without limit, however this is not recommended for a start -task or any task. The default value is 0 (no retries) -""") - maxTaskRetryCount?: int32; -} - -@doc("Specify either the userName or autoUser property, but not both.") -model UserIdentity { - @doc(""" -The userName and autoUser properties are mutually exclusive; you must specify -one but not both. -""") - username?: string; - - @doc(""" -The userName and autoUser properties are mutually exclusive; you must specify -one but not both. -""") - autoUser?: AutoUserSpecification; -} - -@doc(""" -Specifies the parameters for the auto user that runs a Task on the Batch -service. -""") -model AutoUserSpecification { - @doc(""" -The default value is pool. If the pool is running Windows a value of Task -should be specified if stricter isolation between tasks is required. For -example, if the task mutates the registry in a way which could impact other -tasks, or if certificates have been specified on the pool which should not be -accessible by normal tasks but should be accessible by StartTasks. -""") - scope?: AutoUserScope; - - @doc("The default value is nonAdmin.") - elevationLevel?: ElevationLevel; -} - -@doc("A reference to an Package to be deployed to Compute Nodes.") -model ApplicationPackageReference { - @doc(""" -When creating a pool, the package's application ID must be fully qualified -(/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). -""") - applicationId: string; - - @doc(""" -If this is omitted on a Pool, and no default version is specified for this -application, the request fails with the error code -InvalidApplicationPackageReferences and HTTP status code 409. If this is -omitted on a Task, and no default version is specified for this application, -the Task fails with a pre-processing error. -""") - version?: string; -} - -@doc(""" -The settings for an authentication token that the Task can use to perform Batch -service operations. -""") -model AuthenticationTokenSettings { - @doc(""" -The authentication token grants access to a limited set of Batch service -operations. Currently the only supported value for the access property is -'job', which grants access to all operations related to the Job which contains -the Task. -""") - access?: AccessScope[]; -} - -@doc(""" -You can use Job Preparation to prepare a Node to run Tasks for the Job. -Activities commonly performed in Job Preparation include: Downloading common -resource files used by all the Tasks in the Job. The Job Preparation Task can -download these common resource files to the shared location on the Node. -(AZ_BATCH_NODE_ROOT_DIR\\shared), or starting a local service on the Node so -that all Tasks of that Job can communicate with it. If the Job Preparation Task -fails (that is, exhausts its retry count before exiting with exit code 0), -Batch will not run Tasks of this Job on the Node. The Compute Node remains -ineligible to run Tasks of this Job until it is reimaged. The Compute Node -remains active and can be used for other Jobs. The Job Preparation Task can run -multiple times on the same Node. Therefore, you should write the Job -Preparation Task to handle re-execution. If the Node is rebooted, the Job -Preparation Task is run again on the Compute Node before scheduling any other -Task of the Job, if rerunOnNodeRebootAfterSuccess is true or if the Job -Preparation Task did not previously complete. If the Node is reimaged, the Job -Preparation Task is run again before scheduling any Task of the Job. Batch will -retry Tasks when a recovery operation is triggered on a Node. Examples of -recovery operations include (but are not limited to) when an unhealthy Node is -rebooted or a Compute Node disappeared due to host failure. Retries due to -recovery operations are independent of and are not counted against the -maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to -a recovery operation may occur. Because of this, all Tasks should be -idempotent. This means Tasks need to tolerate being interrupted and restarted -without causing any corruption or duplicate data. The best practice for long -running Tasks is to use some form of checkpointing. -""") -model JobPreparationTask { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores and cannot contain more than 64 characters. If you do not -specify this property, the Batch service assigns a default value of -'jobpreparation'. No other Task in the Job can have the same ID as the Job -Preparation Task. If you try to submit a Task with the same id, the Batch -service rejects the request with error code TaskIdSameAsJobPreparationTask; if -you are calling the REST API directly, the HTTP status code is 409 (Conflict). -""") - id?: string; - - @doc(""" -The command line does not run under a shell, and therefore cannot take -advantage of shell features such as environment variable expansion. If you want -to take advantage of such features, you should invoke the shell in the command -line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c -MyCommand\" in Linux. If the command line refers to file paths, it should use a -relative path (relative to the Task working directory), or use the Batch -provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") - commandLine: string; - - @doc(""" -When this is specified, all directories recursively below the -AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are -mapped into the container, all Task environment variables are mapped into the -container, and the Task command line is executed in the container. Files -produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be -reflected to the host disk, meaning that Batch file APIs will not be able to -access those files. -""") - containerSettings?: TaskContainerSettings; - - @doc(""" -Files listed under this element are located in the Task's working directory. -There is a maximum size for the list of resource files. When the max size is -exceeded, the request will fail and the response error code will be -RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be -reduced in size. This can be achieved using .zip files, Application Packages, -or Docker Containers. -""") - resourceFiles?: ResourceFile[]; - - @doc("A list of environment variable settings for the Job Preparation Task.") - environmentSettings?: EnvironmentSetting[]; - - @doc("Execution constraints to apply to a Task.") - constraints?: TaskConstraints; - - @doc(""" -If true and the Job Preparation Task fails on a Node, the Batch service retries -the Job Preparation Task up to its maximum retry count (as specified in the -constraints element). If the Task has still not completed successfully after -all retries, then the Batch service will not schedule Tasks of the Job to the -Node. The Node remains active and eligible to run Tasks of other Jobs. If -false, the Batch service will not wait for the Job Preparation Task to -complete. In this case, other Tasks of the Job can start executing on the -Compute Node while the Job Preparation Task is still running; and even if the -Job Preparation Task fails, new Tasks will continue to be scheduled on the -Compute Node. The default value is true. -""") - waitForSuccess?: boolean; - - @doc(""" -If omitted, the Task runs as a non-administrative user unique to the Task on -Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux -Compute Nodes. -""") - userIdentity?: UserIdentity; - - @doc(""" -The Job Preparation Task is always rerun if a Compute Node is reimaged, or if -the Job Preparation Task did not complete (e.g. because the reboot occurred -while the Task was running). Therefore, you should always write a Job -Preparation Task to be idempotent and to behave correctly if run multiple -times. The default value is true. -""") - rerunOnNodeRebootAfterSuccess?: boolean; -} - -@doc(""" -The Job Release Task runs when the Job ends, because of one of the following: -The user calls the Terminate Job API, or the Delete Job API while the Job is -still active, the Job's maximum wall clock time constraint is reached, and the -Job is still active, or the Job's Job Manager Task completed, and the Job is -configured to terminate when the Job Manager completes. The Job Release Task -runs on each Node where Tasks of the Job have run and the Job Preparation Task -ran and completed. If you reimage a Node after it has run the Job Preparation -Task, and the Job ends without any further Tasks of the Job running on that -Node (and hence the Job Preparation Task does not re-run), then the Job Release -Task does not run on that Compute Node. If a Node reboots while the Job Release -Task is still running, the Job Release Task runs again when the Compute Node -starts up. The Job is not marked as complete until all Job Release Tasks have -completed. The Job Release Task runs in the background. It does not occupy a -scheduling slot; that is, it does not count towards the taskSlotsPerNode limit -specified on the Pool. -""") -model JobReleaseTask { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores and cannot contain more than 64 characters. If you do not -specify this property, the Batch service assigns a default value of -'jobrelease'. No other Task in the Job can have the same ID as the Job Release -Task. If you try to submit a Task with the same id, the Batch service rejects -the request with error code TaskIdSameAsJobReleaseTask; if you are calling the -REST API directly, the HTTP status code is 409 (Conflict). -""") - id?: string; - - @doc(""" -The command line does not run under a shell, and therefore cannot take -advantage of shell features such as environment variable expansion. If you want -to take advantage of such features, you should invoke the shell in the command -line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c -MyCommand\" in Linux. If the command line refers to file paths, it should use a -relative path (relative to the Task working directory), or use the Batch -provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") - commandLine: string; - - @doc(""" -When this is specified, all directories recursively below the -AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are -mapped into the container, all Task environment variables are mapped into the -container, and the Task command line is executed in the container. Files -produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be -reflected to the host disk, meaning that Batch file APIs will not be able to -access those files. -""") - containerSettings?: TaskContainerSettings; - - @doc("Files listed under this element are located in the Task's working directory.") - resourceFiles?: ResourceFile[]; - - @doc("A list of environment variable settings for the Job Release Task.") - environmentSettings?: EnvironmentSetting[]; - - @doc(""" -The maximum elapsed time that the Job Release Task may run on a given Compute -Node, measured from the time the Task starts. If the Task does not complete -within the time limit, the Batch service terminates it. The default value is 15 -minutes. You may not specify a timeout longer than 15 minutes. If you do, the -Batch service rejects it with an error; if you are calling the REST API -directly, the HTTP status code is 400 (Bad Request). -""") - maxWallClockTime?: duration; - - @doc(""" -The default is 7 days, i.e. the Task directory will be retained for 7 days -unless the Compute Node is removed or the Job is deleted. -""") - retentionTime?: duration; - - @doc("If omitted, the Task runs as a non-administrative user unique to the Task.") - userIdentity?: UserIdentity; -} - -@doc("Specifies how a Job should be assigned to a Pool.") -model PoolInformation { - @doc(""" -You must ensure that the Pool referenced by this property exists. If the Pool -does not exist at the time the Batch service tries to schedule a Job, no Tasks -for the Job will run until you create a Pool with that id. Note that the Batch -service will not reject the Job request; it will simply not run Tasks until the -Pool exists. You must specify either the Pool ID or the auto Pool -specification, but not both. -""") - poolId?: string; - - @doc(""" -If auto Pool creation fails, the Batch service moves the Job to a completed -state, and the Pool creation error is set in the Job's scheduling error -property. The Batch service manages the lifetime (both creation and, unless -keepAlive is specified, deletion) of the auto Pool. Any user actions that -affect the lifetime of the auto Pool while the Job is active will result in -unexpected behavior. You must specify either the Pool ID or the auto Pool -specification, but not both. -""") - autoPoolSpecification?: AutoPoolSpecification; -} - -@doc(""" -Specifies characteristics for a temporary 'auto pool'. The Batch service will -create this auto Pool when the Job is submitted. -""") -model AutoPoolSpecification { - @doc(""" -The Batch service assigns each auto Pool a unique identifier on creation. To -distinguish between Pools created for different purposes, you can specify this -element to add a prefix to the ID that is assigned. The prefix can be up to 20 -characters long. -""") - autoPoolIdPrefix?: string; - - @doc(""" -The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule -are assigned to Pools. -""") - poolLifetimeOption: PoolLifetimeOption; - - @doc(""" -If false, the Batch service deletes the Pool once its lifetime (as determined -by the poolLifetimeOption setting) expires; that is, when the Job or Job -Schedule completes. If true, the Batch service does not delete the Pool -automatically. It is up to the user to delete auto Pools created with this -option. -""") - keepAlive?: boolean; - - @doc("Specification for creating a new Pool.") - pool?: PoolSpecification; -} - -@doc("Specification for creating a new Pool.") -model PoolSpecification { - @doc(""" -The display name need not be unique and can contain any Unicode characters up -to a maximum length of 1024. -""") - displayName?: string; - - @doc(""" -For information about available sizes of virtual machines in Pools, see Choose -a VM size for Compute Nodes in an Azure Batch Pool -(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). -""") - vmSize: string; - - @doc(""" -This property must be specified if the Pool needs to be created with Azure PaaS -VMs. This property and virtualMachineConfiguration are mutually exclusive and -one of the properties must be specified. If neither is specified then the Batch -service returns an error; if you are calling the REST API directly, the HTTP -status code is 400 (Bad Request). This property cannot be specified if the -Batch Account was created with its poolAllocationMode property set to -'UserSubscription'. -""") - cloudServiceConfiguration?: CloudServiceConfiguration; - - @doc(""" -This property must be specified if the Pool needs to be created with Azure IaaS -VMs. This property and cloudServiceConfiguration are mutually exclusive and one -of the properties must be specified. If neither is specified then the Batch -service returns an error; if you are calling the REST API directly, the HTTP -status code is 400 (Bad Request). -""") - virtualMachineConfiguration?: VirtualMachineConfiguration; - - @doc(""" -The default value is 1. The maximum value is the smaller of 4 times the number -of cores of the vmSize of the pool or 256. -""") - taskSlotsPerNode?: int32; - - @doc("If not specified, the default is spread.") - taskSchedulingPolicy?: TaskSchedulingPolicy; - - @doc(""" -This timeout applies only to manual scaling; it has no effect when -enableAutoScale is set to true. The default value is 15 minutes. The minimum -value is 5 minutes. If you specify a value less than 5 minutes, the Batch -service rejects the request with an error; if you are calling the REST API -directly, the HTTP status code is 400 (Bad Request). -""") - resizeTimeout?: duration; - - @doc(""" -This property must not be specified if enableAutoScale is set to true. If -enableAutoScale is set to false, then you must set either targetDedicatedNodes, -targetLowPriorityNodes, or both. -""") - targetDedicatedNodes?: int32; - - @doc(""" -This property must not be specified if enableAutoScale is set to true. If -enableAutoScale is set to false, then you must set either targetDedicatedNodes, -targetLowPriorityNodes, or both. -""") - targetLowPriorityNodes?: int32; - - @doc(""" -If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must -be specified. If true, the autoScaleFormula element is required. The Pool -automatically resizes according to the formula. The default value is false. -""") - enableAutoScale?: boolean; - - @doc(""" -This property must not be specified if enableAutoScale is set to false. It is -required if enableAutoScale is set to true. The formula is checked for validity -before the Pool is created. If the formula is not valid, the Batch service -rejects the request with detailed error information. -""") - autoScaleFormula?: string; - - @doc(""" -The default value is 15 minutes. The minimum and maximum value are 5 minutes -and 168 hours respectively. If you specify a value less than 5 minutes or -greater than 168 hours, the Batch service rejects the request with an invalid -property value error; if you are calling the REST API directly, the HTTP status -code is 400 (Bad Request). -""") - autoScaleEvaluationInterval?: duration; - - @doc(""" -Enabling inter-node communication limits the maximum size of the Pool due to -deployment restrictions on the Compute Nodes of the Pool. This may result in -the Pool not reaching its desired size. The default value is false. -""") - enableInterNodeCommunication?: boolean; - - @doc("The network configuration for a Pool.") - networkConfiguration?: NetworkConfiguration; - - @doc(""" -Batch will retry Tasks when a recovery operation is triggered on a Node. -Examples of recovery operations include (but are not limited to) when an -unhealthy Node is rebooted or a Compute Node disappeared due to host failure. -Retries due to recovery operations are independent of and are not counted -against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal -retry due to a recovery operation may occur. Because of this, all Tasks should -be idempotent. This means Tasks need to tolerate being interrupted and -restarted without causing any corruption or duplicate data. The best practice -for long running Tasks is to use some form of checkpointing. In some cases the -StartTask may be re-run even though the Compute Node was not rebooted. Special -care should be taken to avoid StartTasks which create breakaway process or -install/launch services from the StartTask working directory, as this will -block Batch from being able to re-run the StartTask. -""") - startTask?: StartTask; - - @doc(""" -For Windows Nodes, the Batch service installs the Certificates to the specified -Certificate store and location. For Linux Compute Nodes, the Certificates are -stored in a directory inside the Task working directory and an environment -variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this -location. For Certificates with visibility of 'remoteUser', a 'certs' directory -is created in the user's home directory (e.g., /home/{user-name}/certs) and -Certificates are placed in that directory. -""") - certificateReferences?: CertificateReference[]; - - @doc(""" -When creating a pool, the package's application ID must be fully qualified -(/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). -Changes to Package references affect all new Nodes joining the Pool, but do not -affect Compute Nodes that are already in the Pool until they are rebooted or -reimaged. There is a maximum of 10 Package references on any given Pool. -""") - applicationPackageReferences?: ApplicationPackageReference[]; - - @doc(""" -The list of application licenses must be a subset of available Batch service -application licenses. If a license is requested which is not supported, Pool -creation will fail. The permitted licenses available on the Pool are 'maya', -'vray', '3dsmax', 'arnold'. An additional charge applies for each application -license added to the Pool. -""") - applicationLicenses?: string[]; - - @doc("The list of user Accounts to be created on each Compute Node in the Pool.") - userAccounts?: UserAccount[]; - - @doc(""" -The Batch service does not assign any meaning to metadata; it is solely for the -use of user code. -""") - metadata?: MetadataItem[]; - - @doc("This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.") - mountConfiguration?: MountConfiguration[]; - - @doc("If omitted, the default value is Default.") - targetNodeCommunicationMode?: NodeCommunicationMode; -} - -@doc(""" -The configuration for Compute Nodes in a Pool based on the Azure Cloud Services -platform. -""") -model CloudServiceConfiguration { - @doc(""" -Possible values are: -2 - OS Family 2, equivalent to Windows Server 2008 R2 -SP1. -3 - OS Family 3, equivalent to Windows Server 2012. -4 - OS Family 4, -equivalent to Windows Server 2012 R2. -5 - OS Family 5, equivalent to Windows -Server 2016. -6 - OS Family 6, equivalent to Windows Server 2019. For more -information, see Azure Guest OS Releases -(https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). -""") - osFamily: string; - - @doc(""" -The default value is * which specifies the latest operating system version for -the specified OS family. -""") - osVersion?: string; -} - -@doc(""" -The configuration for Compute Nodes in a Pool based on the Azure Virtual -Machines infrastructure. -""") -model VirtualMachineConfiguration { - @doc(""" -A reference to an Azure Virtual Machines Marketplace Image or a Shared Image -Gallery Image. To get the list of all Azure Marketplace Image references -verified by Azure Batch, see the 'List Supported Images' operation. -""") - imageReference: ImageReference; - - @doc(""" -The Batch Compute Node agent is a program that runs on each Compute Node in the -Pool, and provides the command-and-control interface between the Compute Node -and the Batch service. There are different implementations of the Compute Node -agent, known as SKUs, for different operating systems. You must specify a -Compute Node agent SKU which matches the selected Image reference. To get the -list of supported Compute Node agent SKUs along with their list of verified -Image references, see the 'List supported Compute Node agent SKUs' operation. -""") - nodeAgentSKUId: string; - - @doc(""" -This property must not be specified if the imageReference property specifies a -Linux OS Image. -""") - windowsConfiguration?: WindowsConfiguration; - - @doc(""" -This property must be specified if the Compute Nodes in the Pool need to have -empty data disks attached to them. This cannot be updated. Each Compute Node -gets its own disk (the disk is not a file share). Existing disks cannot be -attached, each attached disk is empty. When the Compute Node is removed from -the Pool, the disk and all data associated with it is also deleted. The disk is -not formatted after being attached, it must be formatted before use - for more -information see -https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux -and -https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. -""") - dataDisks?: DataDisk[]; - - @doc(""" -This only applies to Images that contain the Windows operating system, and -should only be used when you hold valid on-premises licenses for the Compute -Nodes which will be deployed. If omitted, no on-premises licensing discount is -applied. Values are: - - Windows_Server - The on-premises license is for Windows -Server. - Windows_Client - The on-premises license is for Windows Client. - -""") - licenseType?: string; - - @doc(""" -If specified, setup is performed on each Compute Node in the Pool to allow -Tasks to run in containers. All regular Tasks and Job manager Tasks run on this -Pool must specify the containerSettings property, and all other Tasks may -specify it. -""") - containerConfiguration?: ContainerConfiguration; - - @doc(""" -If specified, encryption is performed on each node in the pool during node -provisioning. -""") - diskEncryptionConfiguration?: DiskEncryptionConfiguration; - - @doc(""" -This configuration will specify rules on how nodes in the pool will be -physically allocated. -""") - nodePlacementConfiguration?: NodePlacementConfiguration; - - @doc(""" -If specified, the extensions mentioned in this configuration will be installed -on each node. -""") - extensions?: VMExtension[]; - - @doc("Settings for the operating system disk of the compute node (VM).") - osDisk?: OSDisk; -} - -@doc("Windows operating system settings to apply to the virtual machine.") -model WindowsConfiguration { - @doc("If omitted, the default value is true.") - enableAutomaticUpdates?: boolean; -} - -@doc(""" -Settings which will be used by the data disks associated to Compute Nodes in -the Pool. When using attached data disks, you need to mount and format the -disks from within a VM to use them. -""") -model DataDisk { - @doc(""" -The lun is used to uniquely identify each data disk. If attaching multiple -disks, each should have a distinct lun. The value must be between 0 and 63, -inclusive. -""") - lun: int32; - - @doc(""" -The default value for caching is readwrite. For information about the caching -options see: -https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. -""") - caching?: CachingType; - - @doc("The initial disk size in gigabytes.") - diskSizeGB: int32; - - @doc("If omitted, the default is \"standard_lrs\".") - storageAccountType?: StorageAccountType; -} - -@doc("The configuration for container-enabled Pools.") -model ContainerConfiguration { - @doc("The container technology to be used.") - type: ContainerType; - - @doc(""" -This is the full Image reference, as would be specified to \"docker pull\". An -Image will be sourced from the default Docker registry unless the Image is -fully qualified with an alternative registry. -""") - containerImageNames?: string[]; - - @doc(""" -If any Images must be downloaded from a private registry which requires -credentials, then those credentials must be provided here. -""") - containerRegistries?: ContainerRegistry[]; -} - -@doc(""" -The disk encryption configuration applied on compute nodes in the pool. Disk -encryption configuration is not supported on Linux pool created with Shared -Image Gallery Image. -""") -model DiskEncryptionConfiguration { - @doc(""" -If omitted, no disks on the compute nodes in the pool will be encrypted. On -Linux pool, only \"TemporaryDisk\" is supported; on Windows pool, \"OsDisk\" -and \"TemporaryDisk\" must be specified. -""") - targets?: DiskEncryptionTarget[]; -} - -@doc(""" -For regional placement, nodes in the pool will be allocated in the same region. -For zonal placement, nodes in the pool will be spread across different zones -with best effort balancing. -""") -model NodePlacementConfiguration { - @doc(""" -Allocation policy used by Batch Service to provision the nodes. If not -specified, Batch will use the regional policy. -""") - policy?: NodePlacementPolicyType; -} - -@doc("The configuration for virtual machine extensions.") -model VMExtension { - @doc("The name of the virtual machine extension.") - name: string; - - @doc("The name of the extension handler publisher.") - publisher: string; - - @doc("The type of the extension.") - type: string; - - @doc("The version of script handler.") - typeHandlerVersion?: string; - - @doc(""" -Indicates whether the extension should use a newer minor version if one is -available at deployment time. Once deployed, however, the extension will not -upgrade minor versions unless redeployed, even with this property set to true. -""") - autoUpgradeMinorVersion?: boolean; - - @doc("JSON formatted public settings for the extension.") - settings?: object; - - @doc(""" -The extension can contain either protectedSettings or -protectedSettingsFromKeyVault or no protected settings at all. -""") - protectedSettings?: object; - - @doc(""" -Collection of extension names after which this extension needs to be -provisioned. -""") - provisionAfterExtensions?: string[]; -} - -@doc("Settings for the operating system disk of the compute node (VM).") -model OSDisk { - @doc(""" -Specifies the ephemeral Disk Settings for the operating system disk used by the -compute node (VM). -""") - ephemeralOSDiskSettings?: DiffDiskSettings; -} - -@doc(""" -Specifies the ephemeral Disk Settings for the operating system disk used by the -compute node (VM). -""") -model DiffDiskSettings { - @doc(""" -This property can be used by user in the request to choose the location e.g., -cache disk space for Ephemeral OS disk provisioning. For more information on -Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size -requirements for Windows VMs at -https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements -and Linux VMs at -https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. -""") - placement?: DiffDiskPlacement; -} - -@doc("Specifies how Tasks should be distributed across Compute Nodes.") -model TaskSchedulingPolicy { - @doc("If not specified, the default is spread.") - nodeFillType: ComputeNodeFillType; -} - -@doc("The network configuration for a Pool.") -model NetworkConfiguration { - @doc(""" -The virtual network must be in the same region and subscription as the Azure -Batch Account. The specified subnet should have enough free IP addresses to -accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have -enough free IP addresses, the Pool will partially allocate Nodes and a resize -error will occur. The 'MicrosoftAzureBatch' service principal must have the -'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for -the specified VNet. The specified subnet must allow communication from the -Azure Batch service to be able to schedule Tasks on the Nodes. This can be -verified by checking if the specified VNet has any associated Network Security -Groups (NSG). If communication to the Nodes in the specified subnet is denied -by an NSG, then the Batch service will set the state of the Compute Nodes to -unusable. For Pools created with virtualMachineConfiguration only ARM virtual -networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools -created with cloudServiceConfiguration both ARM and classic virtual networks -are supported. If the specified VNet has any associated Network Security Groups -(NSG), then a few reserved system ports must be enabled for inbound -communication. For Pools created with a virtual machine configuration, enable -ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. -For Pools created with a cloud service configuration, enable ports 10100, -20100, and 30100. Also enable outbound connections to Azure Storage on port -443. For more details see: -https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration -""") - subnetId?: string; - - @doc("The scope of dynamic vnet assignment.") - dynamicVNetAssignmentScope?: DynamicVNetAssignmentScope; - - @doc(""" -Pool endpoint configuration is only supported on Pools with the -virtualMachineConfiguration property. -""") - endpointConfiguration?: PoolEndpointConfiguration; - - @doc(""" -Public IP configuration property is only supported on Pools with the -virtualMachineConfiguration property. -""") - publicIPAddressConfiguration?: PublicIPAddressConfiguration; -} - -@doc("The endpoint configuration for a Pool.") -model PoolEndpointConfiguration { - @doc(""" -The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum -number of inbound NAT Pools is exceeded the request fails with HTTP status code -400. This cannot be specified if the IPAddressProvisioningType is -NoPublicIPAddresses. -""") - inboundNATPools: InboundNATPool[]; -} - -@doc(""" -A inbound NAT Pool that can be used to address specific ports on Compute Nodes -in a Batch Pool externally. -""") -model InboundNATPool { - @doc(""" -The name must be unique within a Batch Pool, can contain letters, numbers, -underscores, periods, and hyphens. Names must start with a letter or number, -must end with a letter, number, or underscore, and cannot exceed 77 characters. - If any invalid values are provided the request fails with HTTP status code -400. -""") - name: string; - - @doc("The protocol of the endpoint.") - protocol: InboundEndpointProtocol; - - @doc(""" -This must be unique within a Batch Pool. Acceptable values are between 1 and -65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any -reserved values are provided the request fails with HTTP status code 400. -""") - backendPort: int32; - - @doc(""" -Acceptable values range between 1 and 65534 except ports from 50000 to 55000 -which are reserved. All ranges within a Pool must be distinct and cannot -overlap. Each range must contain at least 40 ports. If any reserved or -overlapping values are provided the request fails with HTTP status code 400. -""") - frontendPortRangeStart: int32; - - @doc(""" -Acceptable values range between 1 and 65534 except ports from 50000 to 55000 -which are reserved by the Batch service. All ranges within a Pool must be -distinct and cannot overlap. Each range must contain at least 40 ports. If any -reserved or overlapping values are provided the request fails with HTTP status -code 400. -""") - frontendPortRangeEnd: int32; - - @doc(""" -The maximum number of rules that can be specified across all the endpoints on a -Batch Pool is 25. If no network security group rules are specified, a default -rule will be created to allow inbound access to the specified backendPort. If -the maximum number of network security group rules is exceeded the request -fails with HTTP status code 400. -""") - networkSecurityGroupRules?: NetworkSecurityGroupRule[]; -} - -@doc("A network security group rule to apply to an inbound endpoint.") -model NetworkSecurityGroupRule { - @doc(""" -Priorities within a Pool must be unique and are evaluated in order of priority. -The lower the number the higher the priority. For example, rules could be -specified with order numbers of 150, 250, and 350. The rule with the order -number of 150 takes precedence over the rule that has an order of 250. Allowed -priorities are 150 to 4096. If any reserved or duplicate values are provided -the request fails with HTTP status code 400. -""") - priority: int32; - - @doc("The action that should be taken for a specified IP address, subnet range or tag.") - access: NetworkSecurityGroupRuleAccess; - - @doc(""" -Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. -192.168.1.0/24), default tag, or * (for all addresses). If any other values -are provided the request fails with HTTP status code 400. -""") - sourceAddressPrefix: string; - - @doc(""" -Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a -port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each -entry in this collection must not overlap any other entry (either a range or an -individual port). If any other values are provided the request fails with HTTP -status code 400. The default value is '*'. -""") - sourcePortRanges?: string[]; -} - -@doc("The public IP Address configuration of the networking configuration of a Pool.") -model PublicIPAddressConfiguration { - @doc("The default value is BatchManaged.") - provision?: IPAddressProvisioningType; - - @doc(""" -The number of IPs specified here limits the maximum size of the Pool - 100 -dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public -IP. For example, a pool needing 250 dedicated VMs would need at least 3 public -IPs specified. Each element of this collection is of the form: -/subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. -""") - ipAddressIds?: string[]; -} - -@doc(""" -Batch will retry Tasks when a recovery operation is triggered on a Node. -Examples of recovery operations include (but are not limited to) when an -unhealthy Node is rebooted or a Compute Node disappeared due to host failure. -Retries due to recovery operations are independent of and are not counted -against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal -retry due to a recovery operation may occur. Because of this, all Tasks should -be idempotent. This means Tasks need to tolerate being interrupted and -restarted without causing any corruption or duplicate data. The best practice -for long running Tasks is to use some form of checkpointing. In some cases the -StartTask may be re-run even though the Compute Node was not rebooted. Special -care should be taken to avoid StartTasks which create breakaway process or -install/launch services from the StartTask working directory, as this will -block Batch from being able to re-run the StartTask. -""") -model StartTask { - @doc(""" -The command line does not run under a shell, and therefore cannot take -advantage of shell features such as environment variable expansion. If you want -to take advantage of such features, you should invoke the shell in the command -line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c -MyCommand\" in Linux. If the command line refers to file paths, it should use a -relative path (relative to the Task working directory), or use the Batch -provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") - commandLine: string; - - @doc(""" -When this is specified, all directories recursively below the -AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are -mapped into the container, all Task environment variables are mapped into the -container, and the Task command line is executed in the container. Files -produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be -reflected to the host disk, meaning that Batch file APIs will not be able to -access those files. -""") - containerSettings?: TaskContainerSettings; - - @doc("Files listed under this element are located in the Task's working directory.") - resourceFiles?: ResourceFile[]; - - @doc("A list of environment variable settings for the StartTask.") - environmentSettings?: EnvironmentSetting[]; - - @doc("If omitted, the Task runs as a non-administrative user unique to the Task.") - userIdentity?: UserIdentity; - - @doc(""" -The Batch service retries a Task if its exit code is nonzero. Note that this -value specifically controls the number of retries. The Batch service will try -the Task once, and may then retry up to this limit. For example, if the maximum -retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 -retries). If the maximum retry count is 0, the Batch service does not retry the -Task. If the maximum retry count is -1, the Batch service retries the Task -without limit, however this is not recommended for a start task or any task. -The default value is 0 (no retries) -""") - maxTaskRetryCount?: int32; - - @doc(""" -If true and the StartTask fails on a Node, the Batch service retries the -StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has -still not completed successfully after all retries, then the Batch service -marks the Node unusable, and will not schedule Tasks to it. This condition can -be detected via the Compute Node state and failure info details. If false, the -Batch service will not wait for the StartTask to complete. In this case, other -Tasks can start executing on the Compute Node while the StartTask is still -running; and even if the StartTask fails, new Tasks will continue to be -scheduled on the Compute Node. The default is true. -""") - waitForSuccess?: boolean; -} - -@doc("A reference to a Certificate to be installed on Compute Nodes in a Pool.") -model CertificateReference { - @doc("The thumbprint of the Certificate.") - thumbprint: string; - - @doc("The algorithm with which the thumbprint is associated. This must be sha1.") - thumbprintAlgorithm: string; - - @doc(""" -The default value is currentuser. This property is applicable only for Pools -configured with Windows Compute Nodes (that is, created with -cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows -Image reference). For Linux Compute Nodes, the Certificates are stored in a -directory inside the Task working directory and an environment variable -AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. -For Certificates with visibility of 'remoteUser', a 'certs' directory is -created in the user's home directory (e.g., /home/{user-name}/certs) and -Certificates are placed in that directory. -""") - storeLocation?: CertificateStoreLocation; - - @doc(""" -This property is applicable only for Pools configured with Windows Compute -Nodes (that is, created with cloudServiceConfiguration, or with -virtualMachineConfiguration using a Windows Image reference). Common store -names include: My, Root, CA, Trust, Disallowed, TrustedPeople, -TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be -used. The default value is My. -""") - storeName?: string; - - @doc(""" -You can specify more than one visibility in this collection. The default is all -Accounts. -""") - visibility?: CertificateVisibility[]; -} - -@doc(""" -Properties used to create a user used to execute Tasks on an Azure Batch -Compute Node. -""") -model UserAccount { - @doc(""" -The name of the user Account. Names can contain any Unicode characters up to a -maximum length of 20. -""") - name: string; - - @doc("The password for the user Account.") - password: string; - - @doc("The default value is nonAdmin.") - elevationLevel?: ElevationLevel; - - @doc(""" -This property is ignored if specified on a Windows Pool. If not specified, the -user is created with the default options. -""") - linuxUserConfiguration?: LinuxUserConfiguration; - - @doc(""" -This property can only be specified if the user is on a Windows Pool. If not -specified and on a Windows Pool, the user is created with the default options. -""") - windowsUserConfiguration?: WindowsUserConfiguration; -} - -@doc("Properties used to create a user Account on a Linux Compute Node.") -model LinuxUserConfiguration { - @doc(""" -The uid and gid properties must be specified together or not at all. If not -specified the underlying operating system picks the uid. -""") - uid?: int32; - - @doc(""" -The uid and gid properties must be specified together or not at all. If not -specified the underlying operating system picks the gid. -""") - gid?: int32; - - @doc(""" -The private key must not be password protected. The private key is used to -automatically configure asymmetric-key based authentication for SSH between -Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication -property is true (it is ignored if enableInterNodeCommunication is false). It -does this by placing the key pair into the user's .ssh directory. If not -specified, password-less SSH is not configured between Compute Nodes (no -modification of the user's .ssh directory is done). -""") - sshPrivateKey?: string; -} - -@doc("Properties used to create a user Account on a Windows Compute Node.") -model WindowsUserConfiguration { - @doc(""" -The default value for VirtualMachineConfiguration Pools is 'batch' and for -CloudServiceConfiguration Pools is 'interactive'. -""") - loginMode?: LoginMode; -} - -@doc(""" -The Batch service does not assign any meaning to this metadata; it is solely -for the use of user code. -""") -model MetadataItem { - @doc("The name of the metadata item.") - name: string; - - @doc("The value of the metadata item.") - value: string; -} - -@doc("The file system to mount on each node.") -model MountConfiguration { - @doc("This property is mutually exclusive with all other properties.") - azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; - - @doc("This property is mutually exclusive with all other properties.") - nfsMountConfiguration?: NFSMountConfiguration; - - @doc("This property is mutually exclusive with all other properties.") - cifsMountConfiguration?: CifsMountConfiguration; - - @doc("This property is mutually exclusive with all other properties.") - azureFileShareConfiguration?: AzureFileShareConfiguration; -} - -@doc("Information used to connect to an Azure Storage Container using Blobfuse.") -model AzureBlobFileSystemConfiguration { - @doc("The Azure Storage Account name.") - accountName: string; - - @doc("The Azure Blob Storage Container name.") - containerName: string; - - @doc(""" -This property is mutually exclusive with both sasKey and identity; exactly one -must be specified. -""") - accountKey?: string; - - @doc(""" -This property is mutually exclusive with both accountKey and identity; exactly -one must be specified. -""") - sasKey?: string; - - @doc("These are 'net use' options in Windows and 'mount' options in Linux.") - blobfuseOptions?: string; - - @doc(""" -All file systems are mounted relative to the Batch mounts directory, accessible -via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. -""") - relativeMountPath: string; - - @doc(""" -This property is mutually exclusive with both accountKey and sasKey; exactly -one must be specified. -""") - identityReference?: ComputeNodeIdentityReference; -} - -@doc("Information used to connect to an NFS file system.") -model NFSMountConfiguration { - @doc("The URI of the file system to mount.") - source: string; - - @doc(""" -All file systems are mounted relative to the Batch mounts directory, accessible -via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. -""") - relativeMountPath: string; - - @doc("These are 'net use' options in Windows and 'mount' options in Linux.") - mountOptions?: string; -} - -@doc("Information used to connect to a CIFS file system.") -model CifsMountConfiguration { - @doc("The user to use for authentication against the CIFS file system.") - username: string; - - @doc("The URI of the file system to mount.") - source: string; - - @doc(""" -All file systems are mounted relative to the Batch mounts directory, accessible -via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. -""") - relativeMountPath: string; - - @doc("These are 'net use' options in Windows and 'mount' options in Linux.") - mountOptions?: string; - - @doc("The password to use for authentication against the CIFS file system.") - password: string; -} - -@doc("Information used to connect to an Azure Fileshare.") -model AzureFileShareConfiguration { - @doc("The Azure Storage account name.") - accountName: string; - - @doc("This is of the form 'https://{account}.file.core.windows.net/'.") - azureFileUrl: string; - - @doc("The Azure Storage account key.") - accountKey: string; - - @doc(""" -All file systems are mounted relative to the Batch mounts directory, accessible -via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. -""") - relativeMountPath: string; - - @doc("These are 'net use' options in Windows and 'mount' options in Linux.") - mountOptions?: string; -} - -@doc(""" -Contains information about Jobs that have been and will be run under a Job -Schedule. -""") -model JobScheduleExecutionInformation { - @doc(""" -This property is meaningful only if the schedule is in the active state when -the time comes around. For example, if the schedule is disabled, no Job will be -created at nextRunTime unless the Job is enabled before then. -""") - nextRunTime?: utcDateTime; - - @doc(""" -This property is present only if the at least one Job has run under the -schedule. -""") - recentJob?: RecentJob; - - @doc("This property is set only if the Job Schedule is in the completed state.") - endTime?: utcDateTime; -} - -@doc("Information about the most recent Job to run under the Job Schedule.") -model RecentJob { - @doc("The ID of the Job.") - id?: string; - - @doc("The URL of the Job.") - url?: string; -} - -@doc("Resource usage statistics for a Job Schedule.") -model JobScheduleStatistics { - @doc("The URL of the statistics.") - url: string; - - @doc("The start time of the time range covered by the statistics.") - startTime: utcDateTime; - - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") - lastUpdateTime: utcDateTime; - - @doc(""" -The total user mode CPU time (summed across all cores and all Compute Nodes) -consumed by all Tasks in all Jobs created under the schedule. -""") - userCPUTime: duration; - - @doc(""" -The total kernel mode CPU time (summed across all cores and all Compute Nodes) -consumed by all Tasks in all Jobs created under the schedule. -""") - kernelCPUTime: duration; - - @doc(""" -The wall clock time is the elapsed time from when the Task started running on a -Compute Node to when it finished (or to the last time the statistics were -updated, if the Task had not finished by then). If a Task was retried, this -includes the wall clock time of all the Task retries. -""") - wallClockTime: duration; - - @doc(""" -The total number of disk read operations made by all Tasks in all Jobs created -under the schedule. -""") - readIOps: int32; - - @doc(""" -The total number of disk write operations made by all Tasks in all Jobs created -under the schedule. -""") - writeIOps: int32; - - @doc(""" -The total gibibytes read from disk by all Tasks in all Jobs created under the -schedule. -""") - readIOGiB: float32; - - @doc(""" -The total gibibytes written to disk by all Tasks in all Jobs created under the -schedule. -""") - writeIOGiB: float32; - - @doc(""" -The total number of Tasks successfully completed during the given time range in -Jobs created under the schedule. A Task completes successfully if it returns -exit code 0. -""") - numSucceededTasks: int32; - - @doc(""" -The total number of Tasks that failed during the given time range in Jobs -created under the schedule. A Task fails if it exhausts its maximum retry count -without returning exit code 0. -""") - numFailedTasks: int32; - - @doc(""" -The total number of retries during the given time range on all Tasks in all -Jobs created under the schedule. -""") - numTaskRetries: int32; - - @doc(""" -This value is only reported in the Account lifetime statistics; it is not -included in the Job statistics. -""") - waitTime: duration; -} - -@doc("The result of listing the Job Schedules in an Account.") -@pagedResult -model BatchJobScheduleListResult { - @doc("The list of Job Schedules.") - @items - value?: BatchJobSchedule[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("An Azure Batch Job.") -model BatchJob { - @doc(""" -The ID is case-preserving and case-insensitive (that is, you may not have two -IDs within an Account that differ only by case). -""") - @visibility("read", "create") - id?: string; - - @doc("The display name for the Job.") - @visibility("read", "create") - displayName?: string; - - @doc(""" -Whether Tasks in the Job can define dependencies on each other. The default is -false. -""") - @visibility("read", "create") - usesTaskDependencies?: boolean; - - @doc("The URL of the Job.") - @visibility("read") - url?: string; - - @doc(""" -This is an opaque string. You can use it to detect whether the Job has changed -between requests. In particular, you can be pass the ETag when updating a Job -to specify that your changes should take effect only if nobody else has -modified the Job in the meantime. -""") - @visibility("read") - eTag?: string; - - @doc(""" -This is the last time at which the Job level data, such as the Job state or -priority, changed. It does not factor in task-level changes such as adding new -Tasks or Tasks changing state. -""") - @visibility("read") - lastModified?: utcDateTime; - - @doc("The creation time of the Job.") - @visibility("read") - creationTime?: utcDateTime; - - @doc("The state of the Job.") - @visibility("read") - state?: JobState; - - @doc("The time at which the Job entered its current state.") - @visibility("read") - stateTransitionTime?: utcDateTime; - - @doc("This property is not set if the Job is in its initial Active state.") - @visibility("read") - previousState?: JobState; - - @doc("This property is not set if the Job is in its initial Active state.") - @visibility("read") - previousStateTransitionTime?: utcDateTime; - - @doc(""" -Priority values can range from -1000 to 1000, with -1000 being the lowest -priority and 1000 being the highest priority. The default value is 0. -""") - priority?: int32; - - @doc(""" -If the value is set to True, other high priority jobs submitted to the system -will take precedence and will be able requeue tasks from this job. You can -update a job's allowTaskPreemption after it has been created using the update -job API. -""") - allowTaskPreemption?: boolean; - - @doc(""" -The value of maxParallelTasks must be -1 or greater than 0 if specified. If not -specified, the default value is -1, which means there's no limit to the number -of tasks that can be run at once. You can update a job's maxParallelTasks after -it has been created using the update job API. -""") - maxParallelTasks?: int32 = -1; - - @doc("The execution constraints for a Job.") - constraints?: JobConstraints; - - @doc(""" -The Job Manager Task is automatically started when the Job is created. The -Batch service tries to schedule the Job Manager Task before any other Tasks in -the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where -Job Manager Tasks are running for as long as possible (that is, Compute Nodes -running 'normal' Tasks are removed before Compute Nodes running Job Manager -Tasks). When a Job Manager Task fails and needs to be restarted, the system -tries to schedule it at the highest priority. If there are no idle Compute -Nodes available, the system may terminate one of the running Tasks in the Pool -and return it to the queue in order to make room for the Job Manager Task to -restart. Note that a Job Manager Task in one Job does not have priority over -Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For -example, if a Job Manager in a priority 0 Job needs to be restarted, it will -not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery -operation is triggered on a Node. Examples of recovery operations include (but -are not limited to) when an unhealthy Node is rebooted or a Compute Node -disappeared due to host failure. Retries due to recovery operations are -independent of and are not counted against the maxTaskRetryCount. Even if the -maxTaskRetryCount is 0, an internal retry due to a recovery operation may -occur. Because of this, all Tasks should be idempotent. This means Tasks need -to tolerate being interrupted and restarted without causing any corruption or -duplicate data. The best practice for long running Tasks is to use some form of -checkpointing. -""") - @visibility("read", "create") - jobManagerTask?: JobManagerTask; - - @doc(""" -The Job Preparation Task is a special Task run on each Compute Node before any -other Task of the Job. -""") - @visibility("read", "create") - jobPreparationTask?: JobPreparationTask; - - @doc(""" -The Job Release Task is a special Task run at the end of the Job on each -Compute Node that has run any other Task of the Job. -""") - @visibility("read", "create") - jobReleaseTask?: JobReleaseTask; - - @doc(""" -Individual Tasks can override an environment setting specified here by -specifying the same setting name with a different value. -""") - @visibility("read", "create") - commonEnvironmentSettings?: EnvironmentSetting[]; - - @doc("Specifies how a Job should be assigned to a Pool.") - poolInfo?: PoolInformation; - - @doc("The default is noaction.") - onAllTasksComplete?: OnAllTasksComplete; - - @doc(""" -A Task is considered to have failed if has a failureInfo. A failureInfo is set -if the Task completes with a non-zero exit code after exhausting its retry -count, or if there was an error starting the Task, for example due to a -resource file download error. The default is noaction. -""") - @visibility("read", "create") - onTaskFailure?: OnTaskFailure; - - @doc("The network configuration for the Job.") - @visibility("read", "create") - networkConfiguration?: JobNetworkConfiguration; - - @doc(""" -The Batch service does not assign any meaning to metadata; it is solely for the -use of user code. -""") - metadata?: MetadataItem[]; - - @doc("Contains information about the execution of a Job in the Azure Batch service.") - @visibility("read") - executionInfo?: JobExecutionInformation; - - @doc(""" -This property is populated only if the CloudJob was retrieved with an expand -clause including the 'stats' attribute; otherwise it is null. The statistics -may not be immediately available. The Batch service performs periodic roll-up -of statistics. The typical delay is about 30 minutes. -""") - @visibility("read") - stats?: JobStatistics; -} - -@doc("Contains information about the execution of a Job in the Azure Batch service.") -model JobExecutionInformation { - @doc("This is the time at which the Job was created.") - startTime: utcDateTime; - - @doc("This property is set only if the Job is in the completed state.") - endTime?: utcDateTime; - - @doc(""" -This element contains the actual Pool where the Job is assigned. When you get -Job details from the service, they also contain a poolInfo element, which -contains the Pool configuration data from when the Job was added or updated. -That poolInfo element may also contain a poolId element. If it does, the two -IDs are the same. If it does not, it means the Job ran on an auto Pool, and -this property contains the ID of that auto Pool. -""") - poolId?: string; - - @doc("This property is not set if there was no error starting the Job.") - schedulingError?: JobSchedulingError; - - @doc(""" -This property is set only if the Job is in the completed state. If the Batch -service terminates the Job, it sets the reason as follows: JMComplete - the Job -Manager Task completed, and killJobOnCompletion was set to true. -MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. -TerminateJobSchedule - the Job ran as part of a schedule, and the schedule -terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to -terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's -onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in -the Job failed with an exit condition that specified a jobAction of -terminatejob. Any other string is a user-defined reason specified in a call to -the 'Terminate a Job' operation. -""") - terminateReason?: string; -} - -@doc("An error encountered by the Batch service when scheduling a Job.") -model JobSchedulingError { - @doc("The category of the error.") - category: ErrorCategory; - - @doc(""" -An identifier for the Job scheduling error. Codes are invariant and are -intended to be consumed programmatically. -""") - code?: string; - - @doc(""" -A message describing the Job scheduling error, intended to be suitable for -display in a user interface. -""") - message?: string; - - @doc("A list of additional error details related to the scheduling error.") - details?: NameValuePair[]; -} - -@doc("Options when disabling a Job.") -model BatchJobDisableParameters { - @doc("What to do with active Tasks associated with the Job.") - disableTasks: DisableJobOption; -} - -@doc("Options when terminating a Job.") -model BatchJobTerminateParameters { - @doc(""" -The text you want to appear as the Job's TerminateReason. The default is -'UserTerminate'. -""") - terminateReason?: string; -} - -@doc("The result of listing the Jobs in an Account.") -@pagedResult -model BatchJobListResult { - @doc("The list of Jobs.") - @items - value?: BatchJob[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc(""" -The result of listing the status of the Job Preparation and Job Release Tasks -for a Job. -""") -@pagedResult -model BatchJobListPreparationAndReleaseTaskStatusResult { - @doc("A list of Job Preparation and Job Release Task execution information.") - @items - value?: JobPreparationAndReleaseTaskExecutionInformation[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("The status of the Job Preparation and Job Release Tasks on a Compute Node.") -model JobPreparationAndReleaseTaskExecutionInformation { - @doc("The ID of the Pool containing the Compute Node to which this entry refers.") - poolId?: string; - - @doc("The ID of the Compute Node to which this entry refers.") - nodeId?: string; - - @doc("The URL of the Compute Node to which this entry refers.") - nodeUrl?: string; - - @doc(""" -Contains information about the execution of a Job Preparation Task on a Compute -Node. -""") - jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformation; - - @doc("This property is set only if the Job Release Task has run on the Compute Node.") - jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformation; -} - -@doc(""" -Contains information about the execution of a Job Preparation Task on a Compute -Node. -""") -model JobPreparationTaskExecutionInformation { - @doc(""" -If the Task has been restarted or retried, this is the most recent time at -which the Task started running. -""") - startTime: utcDateTime; - - @doc("This property is set only if the Task is in the Completed state.") - endTime?: utcDateTime; - - @doc("The current state of the Job Preparation Task on the Compute Node.") - state: JobPreparationTaskState; - - @doc(""" -The root directory of the Job Preparation Task on the Compute Node. You can use -this path to retrieve files created by the Task, such as log files. -""") - taskRootDirectory?: string; - - @doc("The URL to the root directory of the Job Preparation Task on the Compute Node.") - taskRootDirectoryUrl?: string; - - @doc(""" -This parameter is returned only if the Task is in the completed state. The exit -code for a process reflects the specific convention implemented by the -application developer for that process. If you use the exit code value to make -decisions in your code, be sure that you know the exit code convention used by -the application process. Note that the exit code may also be generated by the -Compute Node operating system, such as when a process is forcibly terminated. -""") - exitCode?: int32; - - @doc("This property is set only if the Task runs in a container context.") - containerInfo?: TaskContainerExecutionInformation; - - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") - failureInfo?: TaskFailureInformation; - - @doc(""" -Task application failures (non-zero exit code) are retried, pre-processing -errors (the Task could not be run) and file upload errors are not retried. The -Batch service will retry the Task up to the limit specified by the constraints. -""") - retryCount: int32; - - @doc(""" -This property is set only if the Task was retried (i.e. retryCount is nonzero). -If present, this is typically the same as startTime, but may be different if -the Task has been restarted for reasons other than retry; for example, if the -Compute Node was rebooted during a retry, then the startTime is updated but the -lastRetryTime is not. -""") - lastRetryTime?: utcDateTime; - - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") - result?: TaskExecutionResult; -} - -@doc("Contains information about the container which a Task is executing.") -model TaskContainerExecutionInformation { - @doc("The ID of the container.") - containerId?: string; - - @doc(""" -This is the state of the container according to the Docker service. It is -equivalent to the status field returned by \"docker inspect\". -""") - state?: string; - - @doc(""" -This is the detailed error string from the Docker service, if available. It is -equivalent to the error field returned by \"docker inspect\". -""") - error?: string; -} - -@doc("Information about a Task failure.") -model TaskFailureInformation { - @doc("The category of the error.") - category: ErrorCategory; - - @doc(""" -An identifier for the Task error. Codes are invariant and are intended to be -consumed programmatically. -""") - code?: string; - - @doc(""" -A message describing the Task error, intended to be suitable for display in a -user interface. -""") - message?: string; - - @doc("A list of additional details related to the error.") - details?: NameValuePair[]; -} - -@doc(""" -Contains information about the execution of a Job Release Task on a Compute -Node. -""") -model JobReleaseTaskExecutionInformation { - @doc(""" -If the Task has been restarted or retried, this is the most recent time at -which the Task started running. -""") - startTime: utcDateTime; - - @doc("This property is set only if the Task is in the Completed state.") - endTime?: utcDateTime; - - @doc("The current state of the Job Release Task on the Compute Node.") - state: JobReleaseTaskState; - - @doc(""" -The root directory of the Job Release Task on the Compute Node. You can use -this path to retrieve files created by the Task, such as log files. -""") - taskRootDirectory?: string; - - @doc("The URL to the root directory of the Job Release Task on the Compute Node.") - taskRootDirectoryUrl?: string; - - @doc(""" -This parameter is returned only if the Task is in the completed state. The exit -code for a process reflects the specific convention implemented by the -application developer for that process. If you use the exit code value to make -decisions in your code, be sure that you know the exit code convention used by -the application process. Note that the exit code may also be generated by the -Compute Node operating system, such as when a process is forcibly terminated. -""") - exitCode?: int32; - - @doc("This property is set only if the Task runs in a container context.") - containerInfo?: TaskContainerExecutionInformation; - - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") - failureInfo?: TaskFailureInformation; - - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") - result?: TaskExecutionResult; -} - -@doc("The Task and TaskSlot counts for a Job.") -model TaskCountsResult { - @doc("The Task counts for a Job.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - taskCounts: TaskCounts; - - @doc("The TaskSlot counts for a Job.") - taskSlotCounts: TaskSlotCounts; -} - -@doc("The Task counts for a Job.") -model TaskCounts { - @doc("The number of Tasks in the active state.") - active: int32; - - @doc("The number of Tasks in the running or preparing state.") - running: int32; - - @doc("The number of Tasks in the completed state.") - completed: int32; - - @doc(""" -The number of Tasks which succeeded. A Task succeeds if its result (found in -the executionInfo property) is 'success'. -""") - succeeded: int32; - - @doc(""" -The number of Tasks which failed. A Task fails if its result (found in the -executionInfo property) is 'failure'. -""") - failed: int32; -} - -@doc("The TaskSlot counts for a Job.") -model TaskSlotCounts { - @doc("The number of TaskSlots for active Tasks.") - active: int32; - - @doc("The number of TaskSlots for running Tasks.") - running: int32; - - @doc("The number of TaskSlots for completed Tasks.") - completed: int32; - - @doc("The number of TaskSlots for succeeded Tasks.") - succeeded: int32; - - @doc("The number of TaskSlots for failed Tasks.") - failed: int32; -} - -@doc("A Pool in the Azure Batch service.") -model BatchPool { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores, and cannot contain more than 64 characters. The ID is -case-preserving and case-insensitive (that is, you may not have two IDs within -an Account that differ only by case). -""") - @visibility("read", "create") - id?: string; - - @doc(""" -The display name need not be unique and can contain any Unicode characters up -to a maximum length of 1024. -""") - @visibility("read", "create") - displayName?: string; - - @doc("The URL of the Pool.") - @visibility("read") - url?: string; - - @doc(""" -This is an opaque string. You can use it to detect whether the Pool has changed -between requests. In particular, you can be pass the ETag when updating a Pool -to specify that your changes should take effect only if nobody else has -modified the Pool in the meantime. -""") - @visibility("read") - eTag?: string; - - @doc(""" -This is the last time at which the Pool level data, such as the -targetDedicatedNodes or enableAutoscale settings, changed. It does not factor -in node-level changes such as a Compute Node changing state. -""") - @visibility("read") - lastModified?: utcDateTime; - - @doc("The creation time of the Pool.") - @visibility("read") - creationTime?: utcDateTime; - - @doc("The current state of the Pool.") - @visibility("read") - state?: PoolState; - - @doc("The time at which the Pool entered its current state.") - @visibility("read") - stateTransitionTime?: utcDateTime; - - @doc("Whether the Pool is resizing.") - @visibility("read") - allocationState?: AllocationState; - - @doc("The time at which the Pool entered its current allocation state.") - @visibility("read") - allocationStateTransitionTime?: utcDateTime; - - @doc(""" -For information about available sizes of virtual machines in Pools, see Choose -a VM size for Compute Nodes in an Azure Batch Pool -(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). -""") - @visibility("read", "create") - vmSize?: string; - - @doc(""" -This property and virtualMachineConfiguration are mutually exclusive and one of -the properties must be specified. This property cannot be specified if the -Batch Account was created with its poolAllocationMode property set to -'UserSubscription'. -""") - @visibility("read", "create") - cloudServiceConfiguration?: CloudServiceConfiguration; - - @doc(""" -This property and cloudServiceConfiguration are mutually exclusive and one of -the properties must be specified. -""") - @visibility("read", "create") - virtualMachineConfiguration?: VirtualMachineConfiguration; - - @doc(""" -This is the timeout for the most recent resize operation. (The initial sizing -when the Pool is created counts as a resize.) The default value is 15 minutes. -""") - @visibility("read", "create") - resizeTimeout?: duration; - - @doc(""" -This property is set only if one or more errors occurred during the last Pool -resize, and only when the Pool allocationState is Steady. -""") - @visibility("read") - resizeErrors?: ResizeError[]; - - @doc("The number of dedicated Compute Nodes currently in the Pool.") - @visibility("read") - currentDedicatedNodes?: int32; - - @doc(""" -Spot/Low-priority Compute Nodes which have been preempted are included in this -count. -""") - @visibility("read") - currentLowPriorityNodes?: int32; - - @doc("The desired number of dedicated Compute Nodes in the Pool.") - @visibility("read", "create") - targetDedicatedNodes?: int32; - - @doc("The desired number of Spot/Low-priority Compute Nodes in the Pool.") - @visibility("read", "create") - targetLowPriorityNodes?: int32; - - @doc(""" -If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must -be specified. If true, the autoScaleFormula property is required and the Pool -automatically resizes according to the formula. The default value is false. -""") - @visibility("read", "create") - enableAutoScale?: boolean; - - @doc(""" -This property is set only if the Pool automatically scales, i.e. -enableAutoScale is true. -""") - @visibility("read", "create") - autoScaleFormula?: string; - - @doc(""" -This property is set only if the Pool automatically scales, i.e. -enableAutoScale is true. -""") - @visibility("read", "create") - autoScaleEvaluationInterval?: duration; - - @doc(""" -This property is set only if the Pool automatically scales, i.e. -enableAutoScale is true. -""") - @visibility("read") - autoScaleRun?: AutoScaleRun; - - @doc(""" -This imposes restrictions on which Compute Nodes can be assigned to the Pool. -Specifying this value can reduce the chance of the requested number of Compute -Nodes to be allocated in the Pool. -""") - @visibility("read", "create") - enableInterNodeCommunication?: boolean; - - @doc("The network configuration for a Pool.") - @visibility("read", "create") - networkConfiguration?: NetworkConfiguration; - - @doc(""" -Batch will retry Tasks when a recovery operation is triggered on a Node. -Examples of recovery operations include (but are not limited to) when an -unhealthy Node is rebooted or a Compute Node disappeared due to host failure. -Retries due to recovery operations are independent of and are not counted -against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal -retry due to a recovery operation may occur. Because of this, all Tasks should -be idempotent. This means Tasks need to tolerate being interrupted and -restarted without causing any corruption or duplicate data. The best practice -for long running Tasks is to use some form of checkpointing. In some cases the -StartTask may be re-run even though the Compute Node was not rebooted. Special -care should be taken to avoid StartTasks which create breakaway process or -install/launch services from the StartTask working directory, as this will -block Batch from being able to re-run the StartTask. -""") - startTask?: StartTask; - - @doc(""" -For Windows Nodes, the Batch service installs the Certificates to the specified -Certificate store and location. For Linux Compute Nodes, the Certificates are -stored in a directory inside the Task working directory and an environment -variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this -location. For Certificates with visibility of 'remoteUser', a 'certs' directory -is created in the user's home directory (e.g., /home/{user-name}/certs) and -Certificates are placed in that directory. -""") - certificateReferences?: CertificateReference[]; - - @doc(""" -Changes to Package references affect all new Nodes joining the Pool, but do not -affect Compute Nodes that are already in the Pool until they are rebooted or -reimaged. There is a maximum of 10 Package references on any given Pool. -""") - applicationPackageReferences?: ApplicationPackageReference[]; - - @doc(""" -The list of application licenses must be a subset of available Batch service -application licenses. If a license is requested which is not supported, Pool -creation will fail. -""") - @visibility("read", "create") - applicationLicenses?: string[]; - - @doc(""" -The default value is 1. The maximum value is the smaller of 4 times the number -of cores of the vmSize of the pool or 256. -""") - @visibility("read", "create") - taskSlotsPerNode?: int32; - - @doc("If not specified, the default is spread.") - @visibility("read", "create") - taskSchedulingPolicy?: TaskSchedulingPolicy; - - @doc("The list of user Accounts to be created on each Compute Node in the Pool.") - @visibility("read", "create") - userAccounts?: UserAccount[]; - - @doc("A list of name-value pairs associated with the Pool as metadata.") - metadata?: MetadataItem[]; - - @doc(""" -This property is populated only if the CloudPool was retrieved with an expand -clause including the 'stats' attribute; otherwise it is null. The statistics -may not be immediately available. The Batch service performs periodic roll-up -of statistics. The typical delay is about 30 minutes. -""") - @visibility("read") - stats?: PoolStatistics; - - @doc("This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.") - @visibility("read", "create") - mountConfiguration?: MountConfiguration[]; - - @doc(""" -The list of user identities associated with the Batch pool. The user identity -dictionary key references will be ARM resource ids in the form: -'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. -""") - @visibility("read") - identity?: BatchPoolIdentity; - - @doc("If omitted, the default value is Default.") - targetNodeCommunicationMode?: NodeCommunicationMode; - - @doc("Determines how a pool communicates with the Batch service.") - @visibility("read") - currentNodeCommunicationMode?: NodeCommunicationMode; -} - -@doc("An error that occurred when resizing a Pool.") -model ResizeError { - @doc(""" -An identifier for the Pool resize error. Codes are invariant and are intended -to be consumed programmatically. -""") - code?: string; - - @doc(""" -A message describing the Pool resize error, intended to be suitable for display -in a user interface. -""") - message?: string; - - @doc("A list of additional error details related to the Pool resize error.") - values?: NameValuePair[]; -} - -@doc("The results and errors from an execution of a Pool autoscale formula.") -model AutoScaleRun { - @doc("The time at which the autoscale formula was last evaluated.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - timestamp: utcDateTime; - - @doc(""" -Each variable value is returned in the form $variable=value, and variables are -separated by semicolons. -""") - results?: string; - - @doc("An error that occurred when executing or evaluating a Pool autoscale formula.") - error?: AutoScaleRunError; -} - -@doc("An error that occurred when executing or evaluating a Pool autoscale formula.") -model AutoScaleRunError { - @doc(""" -An identifier for the autoscale error. Codes are invariant and are intended to -be consumed programmatically. -""") - code?: string; - - @doc(""" -A message describing the autoscale error, intended to be suitable for display -in a user interface. -""") - message?: string; - - @doc("A list of additional error details related to the autoscale error.") - values?: NameValuePair[]; -} - -@doc("The identity of the Batch pool, if configured.") -model BatchPoolIdentity { - @doc(""" -The list of user identities associated with the Batch pool. The user identity -dictionary key references will be ARM resource ids in the form: -'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. -""") - type: PoolIdentityType; - - @doc(""" -The user identity dictionary key references will be ARM resource ids in the -form: -'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. -""") - userAssignedIdentities?: UserAssignedIdentity[]; -} - -@doc("The user assigned Identity") -model UserAssignedIdentity { - @doc("The ARM resource id of the user assigned identity") - resourceId: string; - - @doc("The client id of the user assigned identity.") - @visibility("read") - clientId?: string; - - @doc("The principal id of the user assigned identity.") - @visibility("read") - principalId?: string; -} - -@doc("The result of listing the Pools in an Account.") -@pagedResult -model BatchPoolListResult { - @doc("The list of Pools.") - @items - value?: BatchPool[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("Options for enabling automatic scaling on a Pool.") -model BatchPoolEnableAutoScaleParameters { - @doc(""" -The formula is checked for validity before it is applied to the Pool. If the -formula is not valid, the Batch service rejects the request with detailed error -information. For more information about specifying this formula, see -Automatically scale Compute Nodes in an Azure Batch Pool -(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). -""") - autoScaleFormula?: string; - - @doc(""" -The default value is 15 minutes. The minimum and maximum value are 5 minutes -and 168 hours respectively. If you specify a value less than 5 minutes or -greater than 168 hours, the Batch service rejects the request with an invalid -property value error; if you are calling the REST API directly, the HTTP status -code is 400 (Bad Request). If you specify a new interval, then the existing -autoscale evaluation schedule will be stopped and a new autoscale evaluation -schedule will be started, with its starting time being the time when this -request was issued. -""") - autoScaleEvaluationInterval?: duration; -} - -@doc("Options for evaluating an automatic scaling formula on a Pool.") -model BatchPoolEvaluateAutoScaleParameters { - @doc(""" -The formula is validated and its results calculated, but it is not applied to -the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a -Pool'. For more information about specifying this formula, see Automatically -scale Compute Nodes in an Azure Batch Pool -(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). -""") - autoScaleFormula: string; -} - -@doc("Options for changing the size of a Pool.") -model BatchPoolResizeParameters { - @doc("The desired number of dedicated Compute Nodes in the Pool.") - targetDedicatedNodes?: int32; - - @doc("The desired number of Spot/Low-priority Compute Nodes in the Pool.") - targetLowPriorityNodes?: int32; - - @doc(""" -The default value is 15 minutes. The minimum value is 5 minutes. If you specify -a value less than 5 minutes, the Batch service returns an error; if you are -calling the REST API directly, the HTTP status code is 400 (Bad Request). -""") - resizeTimeout?: duration; - - @doc("The default value is requeue.") - nodeDeallocationOption?: ComputeNodeDeallocationOption; -} - -@doc("Options for removing Compute Nodes from a Pool.") -model NodeRemoveParameters { - @doc("A maximum of 100 nodes may be removed per request.") - nodeList: string[]; - - @doc(""" -The default value is 15 minutes. The minimum value is 5 minutes. If you specify -a value less than 5 minutes, the Batch service returns an error; if you are -calling the REST API directly, the HTTP status code is 400 (Bad Request). -""") - resizeTimeout?: duration; - - @doc("The default value is requeue.") - nodeDeallocationOption?: ComputeNodeDeallocationOption; -} - -@doc(""" -Batch will retry Tasks when a recovery operation is triggered on a Node. -Examples of recovery operations include (but are not limited to) when an -unhealthy Node is rebooted or a Compute Node disappeared due to host failure. -Retries due to recovery operations are independent of and are not counted -against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal -retry due to a recovery operation may occur. Because of this, all Tasks should -be idempotent. This means Tasks need to tolerate being interrupted and -restarted without causing any corruption or duplicate data. The best practice -for long running Tasks is to use some form of checkpointing. -""") -model BatchTask { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores, and cannot contain more than 64 characters. -""") - id?: string; - - @doc(""" -The display name need not be unique and can contain any Unicode characters up -to a maximum length of 1024. -""") - displayName?: string; - - @doc("The URL of the Task.") - @visibility("read") - url?: string; - - @doc(""" -This is an opaque string. You can use it to detect whether the Task has changed -between requests. In particular, you can be pass the ETag when updating a Task -to specify that your changes should take effect only if nobody else has -modified the Task in the meantime. -""") - @visibility("read") - eTag?: string; - - @doc("The last modified time of the Task.") - @visibility("read") - lastModified?: utcDateTime; - - @doc("The creation time of the Task.") - @visibility("read") - creationTime?: utcDateTime; - - @doc("How the Batch service should respond when the Task completes.") - exitConditions?: ExitConditions; - - @doc("The state of the Task.") - @visibility("read") - state?: TaskState; - - @doc("The time at which the Task entered its current state.") - @visibility("read") - stateTransitionTime?: utcDateTime; - - @doc("This property is not set if the Task is in its initial Active state.") - @visibility("read") - previousState?: TaskState; - - @doc("This property is not set if the Task is in its initial Active state.") - @visibility("read") - previousStateTransitionTime?: utcDateTime; - - @doc(""" -For multi-instance Tasks, the command line is executed as the primary Task, -after the primary Task and all subtasks have finished executing the -coordination command line. The command line does not run under a shell, and -therefore cannot take advantage of shell features such as environment variable -expansion. If you want to take advantage of such features, you should invoke -the shell in the command line, for example using \"cmd /c MyCommand\" in -Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to -file paths, it should use a relative path (relative to the Task working -directory), or use the Batch provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") - commandLine?: string; - - @doc(""" -If the Pool that will run this Task has containerConfiguration set, this must -be set as well. If the Pool that will run this Task doesn't have -containerConfiguration set, this must not be set. When this is specified, all -directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure -Batch directories on the node) are mapped into the container, all Task -environment variables are mapped into the container, and the Task command line -is executed in the container. Files produced in the container outside of -AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that -Batch file APIs will not be able to access those files. -""") - containerSettings?: TaskContainerSettings; - - @doc(""" -For multi-instance Tasks, the resource files will only be downloaded to the -Compute Node on which the primary Task is executed. There is a maximum size for -the list of resource files. When the max size is exceeded, the request will -fail and the response error code will be RequestEntityTooLarge. If this occurs, -the collection of ResourceFiles must be reduced in size. This can be achieved -using .zip files, Application Packages, or Docker Containers. -""") - resourceFiles?: ResourceFile[]; - - @doc(""" -For multi-instance Tasks, the files will only be uploaded from the Compute Node -on which the primary Task is executed. -""") - outputFiles?: OutputFile[]; - - @doc("A list of environment variable settings for the Task.") - environmentSettings?: EnvironmentSetting[]; - - @doc(""" -A locality hint that can be used by the Batch service to select a Compute Node -on which to start a Task. -""") - affinityInfo?: AffinityInformation; - - @doc("Execution constraints to apply to a Task.") - constraints?: TaskConstraints; - - @doc(""" -The default is 1. A Task can only be scheduled to run on a compute node if the -node has enough free scheduling slots available. For multi-instance Tasks, this -must be 1. -""") - requiredSlots?: int32; - - @doc("If omitted, the Task runs as a non-administrative user unique to the Task.") - userIdentity?: UserIdentity; - - @doc("Information about the execution of a Task.") - @visibility("read") - executionInfo?: TaskExecutionInformation; - - @doc("Information about the Compute Node on which a Task ran.") - @visibility("read") - nodeInfo?: ComputeNodeInformation; - - @doc(""" -Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, -if any of the subtasks fail (for example due to exiting with a non-zero exit -code) the entire multi-instance Task fails. The multi-instance Task is then -terminated and retried, up to its retry limit. -""") - multiInstanceSettings?: MultiInstanceSettings; - - @doc("Resource usage statistics for a Task.") - @visibility("read") - stats?: TaskStatistics; - - @doc(""" -This Task will not be scheduled until all Tasks that it depends on have -completed successfully. If any of those Tasks fail and exhaust their retry -counts, this Task will never be scheduled. -""") - dependsOn?: TaskDependencies; - - @doc(""" -Application packages are downloaded and deployed to a shared directory, not the -Task working directory. Therefore, if a referenced package is already on the -Node, and is up to date, then it is not re-downloaded; the existing copy on the -Compute Node is used. If a referenced Package cannot be installed, for example -because the package has been deleted or because download failed, the Task -fails. -""") - applicationPackageReferences?: ApplicationPackageReference[]; - - @doc(""" -If this property is set, the Batch service provides the Task with an -authentication token which can be used to authenticate Batch service operations -without requiring an Account access key. The token is provided via the -AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the -Task can carry out using the token depend on the settings. For example, a Task -can request Job permissions in order to add other Tasks to the Job, or check -the status of the Job or of other Tasks under the Job. -""") - authenticationTokenSettings?: AuthenticationTokenSettings; -} - -@doc("Specifies how the Batch service should respond when the Task completes.") -model ExitConditions { - @doc(""" -A list of individual Task exit codes and how the Batch service should respond -to them. -""") - exitCodes?: ExitCodeMapping[]; - - @doc(""" -A list of Task exit code ranges and how the Batch service should respond to -them. -""") - exitCodeRanges?: ExitCodeRangeMapping[]; - - @doc("Specifies how the Batch service responds to a particular exit condition.") - preProcessingError?: ExitOptions; - - @doc(""" -If the Task exited with an exit code that was specified via exitCodes or -exitCodeRanges, and then encountered a file upload error, then the action -specified by the exit code takes precedence. -""") - fileUploadError?: ExitOptions; - - @doc(""" -This value is used if the Task exits with any nonzero exit code not listed in -the exitCodes or exitCodeRanges collection, with a pre-processing error if the -preProcessingError property is not present, or with a file upload error if the -fileUploadError property is not present. If you want non-default behavior on -exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges -collection. -""") - default?: ExitOptions; -} - -@doc(""" -How the Batch service should respond if a Task exits with a particular exit -code. -""") -model ExitCodeMapping { - @doc("A process exit code.") - code: int32; - - @doc("Specifies how the Batch service responds to a particular exit condition.") - exitOptions: ExitOptions; -} - -@doc("Specifies how the Batch service responds to a particular exit condition.") -model ExitOptions { - @doc(""" -The default is none for exit code 0 and terminate for all other exit -conditions. If the Job's onTaskFailed property is noaction, then specifying -this property returns an error and the add Task request fails with an invalid -property value error; if you are calling the REST API directly, the HTTP status -code is 400 (Bad Request). -""") - jobAction?: JobAction; - - @doc(""" -Possible values are 'satisfy' (allowing dependent tasks to progress) and -'block' (dependent tasks continue to wait). Batch does not yet support -cancellation of dependent tasks. -""") - dependencyAction?: DependencyAction; -} - -@doc(""" -A range of exit codes and how the Batch service should respond to exit codes -within that range. -""") -model ExitCodeRangeMapping { - @doc("The first exit code in the range.") - start: int32; - - @doc("The last exit code in the range.") - end: int32; - - @doc("Specifies how the Batch service responds to a particular exit condition.") - exitOptions: ExitOptions; -} - -@doc(""" -A locality hint that can be used by the Batch service to select a Compute Node -on which to start a Task. -""") -model AffinityInformation { - @doc(""" -You can pass the affinityId of a Node to indicate that this Task needs to run -on that Compute Node. Note that this is just a soft affinity. If the target -Compute Node is busy or unavailable at the time the Task is scheduled, then the -Task will be scheduled elsewhere. -""") - affinityId: string; -} - -@doc("Information about the execution of a Task.") -model TaskExecutionInformation { - @doc(""" -'Running' corresponds to the running state, so if the Task specifies resource -files or Packages, then the start time reflects the time at which the Task -started downloading or deploying these. If the Task has been restarted or -retried, this is the most recent time at which the Task started running. This -property is present only for Tasks that are in the running or completed state. -""") - startTime?: utcDateTime; - - @doc("This property is set only if the Task is in the Completed state.") - endTime?: utcDateTime; - - @doc(""" -This property is set only if the Task is in the completed state. In general, -the exit code for a process reflects the specific convention implemented by the -application developer for that process. If you use the exit code value to make -decisions in your code, be sure that you know the exit code convention used by -the application process. However, if the Batch service terminates the Task (due -to timeout, or user termination via the API) you may see an operating -system-defined exit code. -""") - exitCode?: int32; - - @doc("This property is set only if the Task runs in a container context.") - containerInfo?: TaskContainerExecutionInformation; - - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") - failureInfo?: TaskFailureInformation; - - @doc(""" -Task application failures (non-zero exit code) are retried, pre-processing -errors (the Task could not be run) and file upload errors are not retried. The -Batch service will retry the Task up to the limit specified by the constraints. -""") - retryCount: int32; - - @doc(""" -This element is present only if the Task was retried (i.e. retryCount is -nonzero). If present, this is typically the same as startTime, but may be -different if the Task has been restarted for reasons other than retry; for -example, if the Compute Node was rebooted during a retry, then the startTime is -updated but the lastRetryTime is not. -""") - lastRetryTime?: utcDateTime; - - @doc(""" -When the user removes Compute Nodes from a Pool (by resizing/shrinking the -pool) or when the Job is being disabled, the user can specify that running -Tasks on the Compute Nodes be requeued for execution. This count tracks how -many times the Task has been requeued for these reasons. -""") - requeueCount: int32; - - @doc("This property is set only if the requeueCount is nonzero.") - lastRequeueTime?: utcDateTime; - - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") - result?: TaskExecutionResult; -} - -@doc("Information about the Compute Node on which a Task ran.") -model ComputeNodeInformation { - @doc(""" -An identifier for the Node on which the Task ran, which can be passed when -adding a Task to request that the Task be scheduled on this Compute Node. -""") - affinityId?: string; - - @doc("The URL of the Compute Node on which the Task ran. ") - nodeUrl?: string; - - @doc("The ID of the Pool on which the Task ran.") - poolId?: string; - - @doc("The ID of the Compute Node on which the Task ran.") - nodeId?: string; - - @doc("The root directory of the Task on the Compute Node.") - taskRootDirectory?: string; - - @doc("The URL to the root directory of the Task on the Compute Node.") - taskRootDirectoryUrl?: string; -} - -@doc(""" -Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, -if any of the subtasks fail (for example due to exiting with a non-zero exit -code) the entire multi-instance Task fails. The multi-instance Task is then -terminated and retried, up to its retry limit. -""") -model MultiInstanceSettings { - @doc("If omitted, the default is 1.") - numberOfInstances?: int32; - - @doc(""" -A typical coordination command line launches a background service and verifies -that the service is ready to process inter-node messages. -""") - coordinationCommandLine: string; - - @doc(""" -The difference between common resource files and Task resource files is that -common resource files are downloaded for all subtasks including the primary, -whereas Task resource files are downloaded only for the primary. Also note that -these resource files are not downloaded to the Task working directory, but -instead are downloaded to the Task root directory (one directory above the -working directory). There is a maximum size for the list of resource files. -When the max size is exceeded, the request will fail and the response error -code will be RequestEntityTooLarge. If this occurs, the collection of -ResourceFiles must be reduced in size. This can be achieved using .zip files, -Application Packages, or Docker Containers. -""") - commonResourceFiles?: ResourceFile[]; -} - -@doc("Resource usage statistics for a Task.") -model TaskStatistics { - @doc("The URL of the statistics.") - url: string; - - @doc("The start time of the time range covered by the statistics.") - startTime: utcDateTime; - - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") - lastUpdateTime: utcDateTime; - - @doc(""" -The total user mode CPU time (summed across all cores and all Compute Nodes) -consumed by the Task. -""") - userCPUTime: duration; - - @doc(""" -The total kernel mode CPU time (summed across all cores and all Compute Nodes) -consumed by the Task. -""") - kernelCPUTime: duration; - - @doc(""" -The wall clock time is the elapsed time from when the Task started running on a -Compute Node to when it finished (or to the last time the statistics were -updated, if the Task had not finished by then). If the Task was retried, this -includes the wall clock time of all the Task retries. -""") - wallClockTime: duration; - - @doc("The total number of disk read operations made by the Task.") - readIOps: int32; - - @doc("The total number of disk write operations made by the Task.") - writeIOps: int32; - - @doc("The total gibibytes read from disk by the Task.") - readIOGiB: float32; - - @doc("The total gibibytes written to disk by the Task.") - writeIOGiB: float32; - - @doc(""" -The total wait time of the Task. The wait time for a Task is defined as the -elapsed time between the creation of the Task and the start of Task execution. -(If the Task is retried due to failures, the wait time is the time to the most -recent Task execution.) -""") - waitTime: duration; -} - -@doc(""" -Specifies any dependencies of a Task. Any Task that is explicitly specified or -within a dependency range must complete before the dependant Task will be -scheduled. -""") -model TaskDependencies { - @doc(""" -The taskIds collection is limited to 64000 characters total (i.e. the combined -length of all Task IDs). If the taskIds collection exceeds the maximum length, -the Add Task request fails with error code TaskDependencyListTooLong. In this -case consider using Task ID ranges instead. -""") - taskIds?: string[]; - - @doc(""" -The list of Task ID ranges that this Task depends on. All Tasks in all ranges -must complete successfully before the dependent Task can be scheduled. -""") - taskIdRanges?: TaskIdRange[]; -} - -@doc(""" -The start and end of the range are inclusive. For example, if a range has start -9 and end 12, then it represents Tasks '9', '10', '11' and '12'. -""") -model TaskIdRange { - @doc("The first Task ID in the range.") - start: int32; - - @doc("The last Task ID in the range.") - end: int32; -} - -@doc("The result of listing the Tasks in a Job.") -@pagedResult -model BatchTaskListResult { - @doc("The list of Tasks.") - @items - value?: BatchTask[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("A collection of Azure Batch Tasks to add.") -model BatchTaskCollection { - @doc(""" -The total serialized size of this collection must be less than 1MB. If it is -greater than 1MB (for example if each Task has 100's of resource files or -environment variables), the request will fail with code 'RequestBodyTooLarge' -and should be retried again with fewer Tasks. -""") - value: BatchTask[]; -} - -@doc("The result of adding a collection of Tasks to a Job.") -model TaskAddCollectionResult { - @doc("The results of the add Task collection operation.") - value?: TaskAddResult[]; -} - -@doc("Result for a single Task added as part of an add Task collection operation.") -model TaskAddResult { - @doc("The status of the add Task request.") - status: TaskAddStatus; - - @doc("The ID of the Task for which this is the result.") - taskId: string; - - @doc(""" -You can use this to detect whether the Task has changed between requests. In -particular, you can be pass the ETag with an Update Task request to specify -that your changes should take effect only if nobody else has modified the Job -in the meantime. -""") - eTag?: string; - - @doc("The last modified time of the Task.") - lastModified?: utcDateTime; - - @doc("The URL of the Task, if the Task was successfully added.") - location?: string; - - @doc("An error response received from the Azure Batch service.") - error?: BatchError; -} - -@doc("The result of listing the subtasks of a Task.") -model BatchTaskListSubtasksResult { - @doc("The list of subtasks.") - value?: SubtaskInformation[]; -} - -@doc("Information about an Azure Batch subtask.") -model SubtaskInformation { - @doc("The ID of the subtask.") - id?: int32; - - @doc("Information about the Compute Node on which a Task ran.") - nodeInfo?: ComputeNodeInformation; - - @doc(""" -The time at which the subtask started running. If the subtask has been -restarted or retried, this is the most recent time at which the subtask started -running. -""") - startTime?: utcDateTime; - - @doc("This property is set only if the subtask is in the Completed state.") - endTime?: utcDateTime; - - @doc(""" -This property is set only if the subtask is in the completed state. In general, -the exit code for a process reflects the specific convention implemented by the -application developer for that process. If you use the exit code value to make -decisions in your code, be sure that you know the exit code convention used by -the application process. However, if the Batch service terminates the subtask -(due to timeout, or user termination via the API) you may see an operating -system-defined exit code. -""") - exitCode?: int32; - - @doc("This property is set only if the Task runs in a container context.") - containerInfo?: TaskContainerExecutionInformation; - - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") - failureInfo?: TaskFailureInformation; - - @doc("The state of the subtask.") - state?: SubtaskState; - - @doc("The time at which the subtask entered its current state.") - stateTransitionTime?: utcDateTime; - - @doc("This property is not set if the subtask is in its initial running state.") - previousState?: SubtaskState; - - @doc("This property is not set if the subtask is in its initial running state.") - previousStateTransitionTime?: utcDateTime; - - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") - result?: TaskExecutionResult; -} - -@doc("A user Account for RDP or SSH access on a Compute Node.") -model ComputeNodeUser { - @doc("The user name of the Account.") - name: string; - - @doc("The default value is false.") - isAdmin?: boolean; - - @doc(""" -If omitted, the default is 1 day from the current time. For Linux Compute -Nodes, the expiryTime has a precision up to a day. -""") - expiryTime?: utcDateTime; - - @doc(""" -The password is required for Windows Compute Nodes (those created with -'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' -using a Windows Image reference). For Linux Compute Nodes, the password can -optionally be specified along with the sshPublicKey property. -""") - password?: string; - - @doc(""" -The public key should be compatible with OpenSSH encoding and should be base 64 -encoded. This property can be specified only for Linux Compute Nodes. If this -is specified for a Windows Compute Node, then the Batch service rejects the -request; if you are calling the REST API directly, the HTTP status code is 400 -(Bad Request). -""") - sshPublicKey?: string; -} - -@doc("The set of changes to be made to a user Account on a Compute Node.") -model NodeUpdateUserParameters { - @doc(""" -The password is required for Windows Compute Nodes (those created with -'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' -using a Windows Image reference). For Linux Compute Nodes, the password can -optionally be specified along with the sshPublicKey property. If omitted, any -existing password is removed. -""") - password?: string; - - @doc(""" -If omitted, the default is 1 day from the current time. For Linux Compute -Nodes, the expiryTime has a precision up to a day. -""") - expiryTime?: utcDateTime; - - @doc(""" -The public key should be compatible with OpenSSH encoding and should be base 64 -encoded. This property can be specified only for Linux Compute Nodes. If this -is specified for a Windows Compute Node, then the Batch service rejects the -request; if you are calling the REST API directly, the HTTP status code is 400 -(Bad Request). If omitted, any existing SSH public key is removed. -""") - sshPublicKey?: string; -} - -@doc("A Compute Node in the Batch service.") -model ComputeNode { - @doc(""" -Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a -Compute Node is removed from a Pool, all of its local files are deleted, and -the ID is reclaimed and could be reused for new Compute Nodes. -""") - id?: string; - - @doc("The URL of the Compute Node.") - url?: string; - - @doc(""" -The Spot/Low-priority Compute Node has been preempted. Tasks which were running -on the Compute Node when it was preempted will be rescheduled when another -Compute Node becomes available. -""") - state?: ComputeNodeState; - - @doc("Whether the Compute Node is available for Task scheduling.") - schedulingState?: SchedulingState; - - @doc("The time at which the Compute Node entered its current state.") - stateTransitionTime?: utcDateTime; - - @doc("This property may not be present if the Compute Node state is unusable.") - lastBootTime?: utcDateTime; - - @doc(""" -This is the time when the Compute Node was initially allocated and doesn't -change once set. It is not updated when the Compute Node is service healed or -preempted. -""") - allocationTime?: utcDateTime; - - @doc(""" -Every Compute Node that is added to a Pool is assigned a unique IP address. -Whenever a Compute Node is removed from a Pool, all of its local files are -deleted, and the IP address is reclaimed and could be reused for new Compute -Nodes. -""") - ipAddress?: string; - - @doc(""" -Note that this is just a soft affinity. If the target Compute Node is busy or -unavailable at the time the Task is scheduled, then the Task will be scheduled -elsewhere. -""") - affinityId?: string; - - @doc(""" -For information about available sizes of virtual machines in Pools, see Choose -a VM size for Compute Nodes in an Azure Batch Pool -(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). -""") - vmSize?: string; - - @doc(""" -The total number of Job Tasks completed on the Compute Node. This includes Job -Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start -Tasks. -""") - totalTasksRun?: int32; - - @doc(""" -The total number of currently running Job Tasks on the Compute Node. This -includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job -Release or Start Tasks. -""") - runningTasksCount?: int32; - - @doc(""" -The total number of scheduling slots used by currently running Job Tasks on the -Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job -Preparation, Job Release or Start Tasks. -""") - runningTaskSlotsCount?: int32; - - @doc(""" -The total number of Job Tasks which completed successfully (with exitCode 0) on -the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job -Preparation, Job Release or Start Tasks. -""") - totalTasksSucceeded?: int32; - - @doc(""" -This property is present only if at least one Task has run on this Compute Node -since it was assigned to the Pool. -""") - recentTasks?: TaskInformation[]; - - @doc(""" -Batch will retry Tasks when a recovery operation is triggered on a Node. -Examples of recovery operations include (but are not limited to) when an -unhealthy Node is rebooted or a Compute Node disappeared due to host failure. -Retries due to recovery operations are independent of and are not counted -against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal -retry due to a recovery operation may occur. Because of this, all Tasks should -be idempotent. This means Tasks need to tolerate being interrupted and -restarted without causing any corruption or duplicate data. The best practice -for long running Tasks is to use some form of checkpointing. In some cases the -StartTask may be re-run even though the Compute Node was not rebooted. Special -care should be taken to avoid StartTasks which create breakaway process or -install/launch services from the StartTask working directory, as this will -block Batch from being able to re-run the StartTask. -""") - startTask?: StartTask; - - @doc("Information about a StartTask running on a Compute Node.") - startTaskInfo?: StartTaskInformation; - - @doc(""" -For Windows Nodes, the Batch service installs the Certificates to the specified -Certificate store and location. For Linux Compute Nodes, the Certificates are -stored in a directory inside the Task working directory and an environment -variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this -location. For Certificates with visibility of 'remoteUser', a 'certs' directory -is created in the user's home directory (e.g., /home/{user-name}/certs) and -Certificates are placed in that directory. -""") - certificateReferences?: CertificateReference[]; - - @doc("The list of errors that are currently being encountered by the Compute Node.") - errors?: ComputeNodeError[]; - - @doc(""" -Whether this Compute Node is a dedicated Compute Node. If false, the Compute -Node is a Spot/Low-priority Compute Node. -""") - isDedicated?: boolean; - - @doc("The endpoint configuration for the Compute Node.") - endpointConfiguration?: ComputeNodeEndpointConfiguration; - - @doc(""" -The Batch Compute Node agent is a program that runs on each Compute Node in the -Pool and provides Batch capability on the Compute Node. -""") - nodeAgentInfo?: NodeAgentInformation; - - @doc("Info about the current state of the virtual machine.") - virtualMachineInfo?: VirtualMachineInfo; -} - -@doc("Information about a Task running on a Compute Node.") -model TaskInformation { - @doc("The URL of the Task.") - taskUrl?: string; - - @doc("The ID of the Job to which the Task belongs.") - jobId?: string; - - @doc("The ID of the Task.") - taskId?: string; - - @doc("The ID of the subtask if the Task is a multi-instance Task.") - subtaskId?: int32; - - @doc("The state of the Task.") - taskState: TaskState; - - @doc("Information about the execution of a Task.") - executionInfo?: TaskExecutionInformation; -} - -@doc("Information about a StartTask running on a Compute Node.") -model StartTaskInformation { - @doc("The state of the StartTask on the Compute Node.") - state: StartTaskState; - - @doc(""" -This value is reset every time the Task is restarted or retried (that is, this -is the most recent time at which the StartTask started running). -""") - startTime: utcDateTime; - - @doc(""" -This is the end time of the most recent run of the StartTask, if that run has -completed (even if that run failed and a retry is pending). This element is not -present if the StartTask is currently running. -""") - endTime?: utcDateTime; - - @doc(""" -This property is set only if the StartTask is in the completed state. In -general, the exit code for a process reflects the specific convention -implemented by the application developer for that process. If you use the exit -code value to make decisions in your code, be sure that you know the exit code -convention used by the application process. However, if the Batch service -terminates the StartTask (due to timeout, or user termination via the API) you -may see an operating system-defined exit code. -""") - exitCode?: int32; - - @doc("This property is set only if the Task runs in a container context.") - containerInfo?: TaskContainerExecutionInformation; - - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") - failureInfo?: TaskFailureInformation; - - @doc(""" -Task application failures (non-zero exit code) are retried, pre-processing -errors (the Task could not be run) and file upload errors are not retried. The -Batch service will retry the Task up to the limit specified by the constraints. -""") - retryCount: int32; - - @doc(""" -This element is present only if the Task was retried (i.e. retryCount is -nonzero). If present, this is typically the same as startTime, but may be -different if the Task has been restarted for reasons other than retry; for -example, if the Compute Node was rebooted during a retry, then the startTime is -updated but the lastRetryTime is not. -""") - lastRetryTime?: utcDateTime; - - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") - result?: TaskExecutionResult; -} - -@doc("An error encountered by a Compute Node.") -model ComputeNodeError { - @doc(""" -An identifier for the Compute Node error. Codes are invariant and are intended -to be consumed programmatically. -""") - code?: string; - - @doc(""" -A message describing the Compute Node error, intended to be suitable for -display in a user interface. -""") - message?: string; - - @doc("The list of additional error details related to the Compute Node error.") - errorDetails?: NameValuePair[]; -} - -@doc("The endpoint configuration for the Compute Node.") -model ComputeNodeEndpointConfiguration { - @doc("The list of inbound endpoints that are accessible on the Compute Node.") - inboundEndpoints: InboundEndpoint[]; -} - -@doc("An inbound endpoint on a Compute Node.") -model InboundEndpoint { - @doc("The name of the endpoint.") - name: string; - - @doc("The protocol of the endpoint.") - protocol: InboundEndpointProtocol; - - @doc("The public IP address of the Compute Node.") - publicIPAddress: string; - - @doc("The public fully qualified domain name for the Compute Node.") - publicFQDN: string; - - @doc("The public port number of the endpoint.") - frontendPort: int32; - - @doc("The backend port number of the endpoint.") - backendPort: int32; -} - -@doc(""" -The Batch Compute Node agent is a program that runs on each Compute Node in the -Pool and provides Batch capability on the Compute Node. -""") -model NodeAgentInformation { - @doc(""" -This version number can be checked against the Compute Node agent release notes -located at -https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. -""") - version: string; - - @doc(""" -This is the most recent time that the Compute Node agent was updated to a new -version. -""") - lastUpdateTime: utcDateTime; -} - -@doc("Info about the current state of the virtual machine.") -model VirtualMachineInfo { - @doc(""" -A reference to an Azure Virtual Machines Marketplace Image or a Shared Image -Gallery Image. To get the list of all Azure Marketplace Image references -verified by Azure Batch, see the 'List Supported Images' operation. -""") - imageReference?: ImageReference; -} - -@doc("Options for rebooting a Compute Node.") -model NodeRebootParameters { - @doc("The default value is requeue.") - nodeRebootOption?: ComputeNodeRebootOption; -} - -@doc("Options for reimaging a Compute Node.") -model NodeReimageParameters { - @doc("The default value is requeue.") - nodeReimageOption?: ComputeNodeReimageOption; -} - -@doc("Options for disabling scheduling on a Compute Node.") -model NodeDisableSchedulingParameters { - @doc("The default value is requeue.") - nodeDisableSchedulingOption?: DisableComputeNodeSchedulingOption; -} - -@doc("The remote login settings for a Compute Node.") -model ComputeNodeGetRemoteLoginSettingsResult { - @doc("The IP address used for remote login to the Compute Node.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - remoteLoginIPAddress: string; - - @doc("The port used for remote login to the Compute Node.") - remoteLoginPort: int32; -} - -@doc("The Azure Batch service log files upload configuration for a Compute Node.") -model UploadBatchServiceLogsConfiguration { - @doc(""" -If a user assigned managed identity is not being used, the URL must include a -Shared Access Signature (SAS) granting write permissions to the container. The -SAS duration must allow enough time for the upload to finish. The start time -for SAS is optional and recommended to not be specified. -""") - containerUrl: string; - - @doc(""" -Any log file containing a log message in the time range will be uploaded. This -means that the operation might retrieve more logs than have been requested -since the entire log file is always uploaded, but the operation should not -retrieve fewer logs than have been requested. -""") - startTime: utcDateTime; - - @doc(""" -Any log file containing a log message in the time range will be uploaded. This -means that the operation might retrieve more logs than have been requested -since the entire log file is always uploaded, but the operation should not -retrieve fewer logs than have been requested. If omitted, the default is to -upload all logs available after the startTime. -""") - endTime?: utcDateTime; - - @doc("The identity must have write access to the Azure Blob Storage container.") - identityReference?: ComputeNodeIdentityReference; -} - -@doc("The result of uploading Batch service log files from a specific Compute Node.") -@resource("pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs") -model UploadBatchServiceLogsResult { - @doc(""" -The virtual directory name is part of the blob name for each log file uploaded, -and it is built based poolId, nodeId and a unique identifier. -""") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key - virtualDirectoryName: string; - - @doc("The number of log files which will be uploaded.") - numberOfFilesUploaded: int32; -} - -@doc("The result of listing the Compute Nodes in a Pool.") -@pagedResult -model ComputeNodeListResult { - @doc("The list of Compute Nodes.") - @items - value?: ComputeNode[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} - -@doc("The configuration for virtual machine extension instance view.") -model NodeVMExtension { - @doc("The provisioning state of the virtual machine extension.") - provisioningState?: string; - - @doc("The configuration for virtual machine extensions.") - vmExtension?: VMExtension; - - @doc("The vm extension instance view.") - instanceView?: VMExtensionInstanceView; -} - -@doc("The vm extension instance view.") -model VMExtensionInstanceView { - @doc("The name of the vm extension instance view.") - name?: string; - - @doc("The resource status information.") - statuses?: InstanceViewStatus[]; - - @doc("The resource status information.") - subStatuses?: InstanceViewStatus[]; -} - -@doc("The instance view status.") -model InstanceViewStatus { - @doc("The status code.") - code?: string; - - @doc("The localized label for the status.") - displayStatus?: string; - - @doc("Level code.") - level?: StatusLevelTypes; - - @doc("The detailed status message.") - message?: string; - - @doc("The time of the status.") - time?: string; -} - -@doc("The result of listing the Compute Node extensions in a Node.") -@pagedResult -model NodeVMExtensionList { - @doc("The list of Compute Node extensions.") - @items - value?: NodeVMExtension[]; - - @doc("The URL to get the next set of results.") - @nextLink - "odata.nextLink"?: string; -} diff --git a/packages/typespec-test/test/batch/spec/routes.tsp b/packages/typespec-test/test/batch/spec/routes.tsp deleted file mode 100644 index 3170b718a1..0000000000 --- a/packages/typespec-test/test/batch/spec/routes.tsp +++ /dev/null @@ -1,2423 +0,0 @@ -import "@typespec/rest"; -import "@typespec/versioning"; -import "@azure-tools/typespec-azure-core"; -import "@azure-tools/typespec-autorest"; -import "@typespec/openapi"; -import "@azure-tools/typespec-client-generator-core"; -import "./models.tsp"; - -using TypeSpec.Reflection; -using TypeSpec.Http; -using TypeSpec.Rest; -using Autorest; -using TypeSpec.Versioning; -using Azure.Core; -using OpenAPI; -using Azure.ClientGenerator; - -namespace BatchServiceClient; - -// Interfaces //////////////////// -@Azure.ClientGenerator.Core.operationGroup -@tag("Applications") -interface ApplicationOperations { - @summary("Lists all of the applications available in the specified Account.") - @doc(""" -This operation returns only Applications and versions that are available for -use on Compute Nodes; that is, that can be used in an Package reference. For -administrator information about applications and versions that are not yet -available to Compute Nodes, use the Azure portal or the Azure Resource Manager -API. -""") - @example("./examples/ApplicationList.json", "List applications") - @route("/applications") - @operationId("Application_List") - @get - List is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders, - BatchResponseHeaders & ApplicationListResult - >; - - @summary("Gets information about the specified Application.") - @doc(""" -This operation returns only Applications and versions that are available for -use on Compute Nodes; that is, that can be used in an Package reference. For -administrator information about Applications and versions that are not yet -available to Compute Nodes, use the Azure portal or the Azure Resource Manager -API. -""") - @example("./examples/ApplicationGet.json", "Get applications") - Get is Azure.Core.ResourceRead< - Application, - { - parameters: BatchClientRequestHeaders; - response: BatchError; - } - >; -} - -@tag("Pools") -@Azure.ClientGenerator.Core.operationGroup -interface Pool { - @summary(""" -Lists the usage metrics, aggregated by Pool across individual time intervals, -for the specified Account. -""") - @doc(""" -If you do not specify a $filter clause including a poolId, the response -includes all Pools that existed in the Account in the time range of the -returned aggregation intervals. If you do not specify a $filter clause -including a startTime or endTime these filters default to the start and end -times of the last aggregation interval currently available; that is, only the -last aggregation interval is returned. -""") - @example("./examples/PoolListUsageMetrics.json", "Pool list usage metrics") - ListUsageMetrics is Azure.Core.ResourceList< - PoolUsageMetrics, - { - parameters: BatchApplicationListHeaders & - Pool_ListUsageMetricRequestHeaders; - response: BatchResponseHeaders; - } - >; - - @summary("Gets lifetime summary statistics for all of the Pools in the specified Account.") - @doc(""" -Statistics are aggregated across all Pools that have ever existed in the -Account, from Account creation to the last update time of the statistics. The -statistics may not be immediately available. The Batch service performs -periodic roll-up of statistics. The typical delay is about 30 minutes. -""") - @route("/lifetimepoolstats") - @example( - "./examples/PoolGetLifetimeStatistics.json", - "Pool get lifetime statistics" - ) - @get - GetAllLifetimeStatistics is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders, - BatchResponseHeaders & PoolStatistics - >; - - @summary("Adds a Pool to the specified Account.") - @doc(""" -When naming Pools, avoid including sensitive information such as user names or -secret project names. This information may appear in telemetry logs accessible -to Microsoft Support engineers. -""") - @route("/pools") - @example( - "./examples/PoolAdd_CloudServiceConfiguration.json", - "Add a CloudServiceConfiguration pool" - ) - @example( - "./examples/PoolAdd_VirtualMachineConfiguration.json", - "Add a VirtualMachineConfiguration pool" - ) - @example( - "./examples/PoolAdd_VirtualMachineConfigurationWithContainers.json", - "Add a VirtualMachineConfiguration pool with containers" - ) - @example( - "./examples/PoolAdd_VirtualMachineConfigurationWithExtensions.json", - "Add a VirtualMachineConfiguration pool with extensions" - ) - @example( - "./examples/PoolAdd_MountConfiguration.json", - "Add a pool with mount drive specified" - ) - @post - Add is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @body - @doc("The Pool to be added.") - pool: BatchPool; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "201"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Lists all of the Pools in the specified Account.") - @doc("Lists all of the Pools in the specified Account.") - @route("/pools") - @example("./examples/PoolList_Basic.json", "Pool list") - @get - List is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. -""") - @query - $filter?: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, - BatchResponseHeaders & BatchPoolListResult - >; - - @summary("Deletes a Pool from the specified Account.") - @doc(""" -When you request that a Pool be deleted, the following actions occur: the Pool -state is set to deleting; any ongoing resize operation on the Pool are stopped; -the Batch service starts resizing the Pool to zero Compute Nodes; any Tasks -running on existing Compute Nodes are terminated and requeued (as if a resize -Pool operation had been requested with the default requeue option); finally, -the Pool is removed from the system. Because running Tasks are requeued, the -user can rerun these Tasks by updating their Job to target a different Pool. -The Tasks can then run on the new Pool. If you want to override the requeue -behavior, then you should call resize Pool explicitly to shrink the Pool to -zero size before deleting the Pool. If you call an Update, Patch or Delete API -on a Pool in the deleting state, it will fail with HTTP status code 409 with -error code PoolBeingDeleted. -""") - @example("./examples/PoolDelete.json", "Pool delete") - @route("/pools/{poolId}") - @delete - Delete is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchPoolHeaders, - DeleteResponseHeaders - >; - - @doc("Gets basic properties of a Pool.") - @example("./examples/PoolExists.json", "Check Pool Exists") - @route("/pools/{poolId}") - @head - Exists is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchPoolHeaders, - PoolDoesntExistResponseHeaders - >; - - @doc("Gets information about the specified Pool.") - @example("./examples/PoolGet_Basic.json", "Pool get") - @example( - "./examples/PoolGet_VirtualMachineConfigurationWithExtensions.json", - "Get a VirtualMachineConfiguration pool with extensions" - ) - @route("/pools/{poolId}") - @get - Get is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchPoolHeaders & { - @doc("An OData $select clause.") - @query - $select: string; - - @doc("An OData $expand clause.") - @query - $expand: string; - }, - BatchResponseHeaders & BatchPool - >; - - @summary("Updates the properties of the specified Pool.") - @doc(""" -This only replaces the Pool properties specified in the request. For example, -if the Pool has a StartTask associated with it, and a request does not specify -a StartTask element, then the Pool keeps the existing StartTask. -""") - @example("./examples/PoolPatch.json", "Patch the Pool") - @route("/pools/{poolId}") - @patch - Patch is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchPoolHeaders & { - @doc("The parameters for the request.") - @body - poolUpdate: BatchPool; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Disables automatic scaling for a Pool.") - @doc("Disables automatic scaling for a Pool.") - @example("./examples/PoolDisableAutoScale.json", "Disable pool autoscale") - @route("/pools/{poolId}/disableautoscale") - @post - DisableAutoScale is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool on which to disable automatic scaling.") - @path - poolId: string; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Enables automatic scaling for a Pool.") - @doc(""" -You cannot enable automatic scaling on a Pool if a resize operation is in -progress on the Pool. If automatic scaling of the Pool is currently disabled, -you must specify a valid autoscale formula as part of the request. If automatic -scaling of the Pool is already enabled, you may specify a new autoscale formula -and/or a new evaluation interval. You cannot call this API for the same Pool -more than once every 30 seconds. -""") - @example("./examples/PoolEnableAutoscale.json", "Pool enable autoscale") - @route("/pools/{poolId}/enableautoscale") - @post - EnableAutoScale is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchPoolHeaders & { - @doc("The parameters for the request.") - @body - parameters: BatchPoolEnableAutoScaleParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Gets the result of evaluating an automatic scaling formula on the Pool.") - @doc(""" -This API is primarily for validating an autoscale formula, as it simply returns -the result without applying the formula to the Pool. The Pool must have auto -scaling enabled in order to evaluate a formula. -""") - @example("./examples/PoolEvaluateAutoscale.json", "Pool evaluate autoscale") - @route("/pools/{poolId}/evaluateautoscale") - @post - EvaluateAutoScale is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool on which to evaluate the automatic scaling formula.") - @path - poolId: string; - - @doc("The parameters for the request.") - @body - parameters: BatchPoolEvaluateAutoScaleParameters; - }, - BatchResponseHeaders & - AutoScaleRun & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Changes the number of Compute Nodes that are assigned to a Pool.") - @doc(""" -You can only resize a Pool when its allocation state is steady. If the Pool is -already resizing, the request fails with status code 409. When you resize a -Pool, the Pool's allocation state changes from steady to resizing. You cannot -resize Pools which are configured for automatic scaling. If you try to do this, -the Batch service returns an error 409. If you resize a Pool downwards, the -Batch service chooses which Compute Nodes to remove. To remove specific Compute -Nodes, use the Pool remove Compute Nodes API instead. -""") - @example("./examples/PoolResize.json", "Pool resize") - @route("/pools/{poolId}/resize") - @post - Resize is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchPoolHeaders & { - @doc("The parameters for the request.") - @body - parameters: BatchPoolResizeParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Stops an ongoing resize operation on the Pool.") - @doc(""" -This does not restore the Pool to its previous state before the resize -operation: it only stops any further changes being made, and the Pool maintains -its current state. After stopping, the Pool stabilizes at the number of Compute -Nodes it was at when the stop operation was done. During the stop operation, -the Pool allocation state changes first to stopping and then to steady. A -resize operation need not be an explicit resize Pool request; this API can also -be used to halt the initial sizing of the Pool when it is created. -""") - @example("./examples/PoolStopResize.json", "Pool stop resize") - @route("/pools/{poolId}/stopresize") - @post - StopResize is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchPoolHeaders, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Updates the properties of the specified Pool.") - @doc(""" -This fully replaces all the updatable properties of the Pool. For example, if -the Pool has a StartTask associated with it and if StartTask is not specified -with this request, then the Batch service will remove the existing StartTask. -""") - @example("./examples/PoolUpdate.json", "Pool update") - @route("/pools/{poolId}/updateproperties") - @post - UpdateProperties is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool to update.") - @path - poolId: string; - - @doc("The parameters for the request.") - @body - poolUpdatePropertiesParameter: BatchPool; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Removes Compute Nodes from the specified Pool.") - @doc(""" -This operation can only run when the allocation state of the Pool is steady. -When this operation runs, the allocation state changes from steady to resizing. -Each request may remove up to 100 nodes. -""") - @example("./examples/PoolRemoveNodes.json", "Pool remove nodes") - @route("/pools/{poolId}/removenodes") - @post - RemoveNodes is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchPoolHeaders & { - @doc("The parameters for the request.") - @body - parameters: NodeRemoveParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; -} - -@tag("Accounts") -@Azure.ClientGenerator.Core.operationGroup -interface Account { - @summary("Lists all Virtual Machine Images supported by the Azure Batch service.") - @doc("Lists all Virtual Machine Images supported by the Azure Batch service.") - @example( - "./examples/AccountListSupportedImages.json", - "Account list node agent skus" - ) - @route("/supportedimages") - @get - ListSupportedImages is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. -""") - @query - $filter?: string; - }, - BatchResponseHeaders & AccountListSupportedImagesResult - >; - - @doc(""" -Gets the number of Compute Nodes in each state, grouped by Pool. Note that the -numbers returned may not always be up to date. If you need exact node counts, -use a list query. -""") - @example("./examples/AccountListPoolNodeCounts.json", "NodeCountsPayload") - @route("/nodecounts") - @get - ListPoolNodeCounts is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. -""") - @query - $filter?: string; - }, - BatchClientResponseHeaders & PoolNodeCountsListResult - >; -} - -@tag("Jobs") -@Azure.ClientGenerator.Core.operationGroup -interface Job { - @summary("Gets lifetime summary statistics for all of the Jobs in the specified Account.") - @doc(""" -Statistics are aggregated across all Jobs that have ever existed in the -Account, from Account creation to the last update time of the statistics. The -statistics may not be immediately available. The Batch service performs -periodic roll-up of statistics. The typical delay is about 30 minutes. -""") - @example( - "./examples/JobGetLifetimeStatistics.json", - "Job get lifetime statistics" - ) - @route("/lifetimejobstats") - @get - GetAllLifetimeStatistics is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders, - BatchResponseHeaders & JobStatistics - >; - - @summary("Deletes a Job.") - @doc(""" -Deleting a Job also deletes all Tasks that are part of that Job, and all Job -statistics. This also overrides the retention period for Task data; that is, if -the Job contains Tasks which are still retained on Compute Nodes, the Batch -services deletes those Tasks' working directories and all their contents. When -a Delete Job request is received, the Batch service sets the Job to the -deleting state. All update operations on a Job that is in deleting state will -fail with status code 409 (Conflict), with additional information indicating -that the Job is being deleted. -""") - @example("./examples/JobDelete.json", "Delete Job") - @route("/jobs/{jobId}") - @delete - Delete is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job to delete.") - @path - jobId: string; - }, - DeleteResponseHeaders - >; - - @summary("Gets information about the specified Job.") - @doc("Gets information about the specified Job.") - @example("./examples/JobGet.json", "Job get") - @route("/jobs/{jobId}") - @get - Get is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job.") - @path - jobId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, - BatchResponseHeaders & BatchJob - >; - - @summary("Updates the properties of the specified Job.") - @doc(""" -This replaces only the Job properties specified in the request. For example, if -the Job has constraints, and a request does not specify the constraints -element, then the Job keeps the existing constraints. -""") - @example("./examples/JobPatch.json", "Job patch") - @route("/jobs/{jobId}") - @patch - Patch is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job whose properties you want to update.") - @path - jobId: string; - - @doc("The parameters for the request.") - @body - jobUpdate: BatchJob; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Updates the properties of the specified Job.") - @doc(""" -This fully replaces all the updatable properties of the Job. For example, if -the Job has constraints associated with it and if constraints is not specified -with this request, then the Batch service will remove the existing constraints. -""") - @example("./examples/JobUpdate.json", "Job update") - @route("/jobs/{jobId}") - @put - Update is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job whose properties you want to update.") - @path - jobId: string; - - @doc("The parameters for the request.") - @body - job: BatchJob; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Disables the specified Job, preventing new Tasks from running.") - @doc(""" -The Batch Service immediately moves the Job to the disabling state. Batch then -uses the disableTasks parameter to determine what to do with the currently -running Tasks of the Job. The Job remains in the disabling state until the -disable operation is completed and all Tasks have been dealt with according to -the disableTasks option; the Job then moves to the disabled state. No new Tasks -are started under the Job until it moves back to active state. If you try to -disable a Job that is in any state other than active, disabling, or disabled, -the request fails with status code 409. -""") - @example("./examples/JobDisable.json", "Job disable") - @route("/jobs/{jobId}/disable") - @post - Disable is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job to disable.") - @path - jobId: string; - - @doc("The parameters for the request.") - @body - parameters: BatchJobDisableParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "202"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Enables the specified Job, allowing new Tasks to run.") - @doc(""" -When you call this API, the Batch service sets a disabled Job to the enabling -state. After the this operation is completed, the Job moves to the active -state, and scheduling of new Tasks under the Job resumes. The Batch service -does not allow a Task to remain in the active state for more than 180 days. -Therefore, if you enable a Job containing active Tasks which were added more -than 180 days ago, those Tasks will not run. -""") - @example("./examples/JobEnable.json", "Job enable") - @route("/jobs/{jobId}/enable") - @post - Enable is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job to enable.") - @path - jobId: string; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "202"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Terminates the specified Job, marking it as completed.") - @doc(""" -When a Terminate Job request is received, the Batch service sets the Job to the -terminating state. The Batch service then terminates any running Tasks -associated with the Job and runs any required Job release Tasks. Then the Job -moves into the completed state. If there are any Tasks in the Job in the active -state, they will remain in the active state. Once a Job is terminated, new -Tasks cannot be added and any remaining active Tasks will not be scheduled. -""") - @example("./examples/JobTerminate.json", "Job terminate") - @route("/jobs/{jobId}/terminate") - @post - Terminate is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job to terminate.") - @path - jobId: string; - - @doc("The parameters for the request.") - @body - parameters: BatchJobTerminateParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "202"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Adds a Job to the specified Account.") - @doc(""" -The Batch service supports two ways to control the work done as part of a Job. -In the first approach, the user specifies a Job Manager Task. The Batch service -launches this Task when it is ready to start the Job. The Job Manager Task -controls all other Tasks that run under this Job, by using the Task APIs. In -the second approach, the user directly controls the execution of Tasks under an -active Job, by using the Task APIs. Also note: when naming Jobs, avoid -including sensitive information such as user names or secret project names. -This information may appear in telemetry logs accessible to Microsoft Support -engineers. -""") - @example("./examples/JobAdd_Basic.json", "Add a basic job") - @example("./examples/JobAdd_Complex.json", "Add a complex job") - @route("/jobs") - @post - Add is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The Job to be added.") - @body - job: BatchJob; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "201"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - DataServiceId: string; - } - >; - - @summary("Lists all of the Jobs in the specified Account.") - @doc("Lists all of the Jobs in the specified Account.") - @example("./examples/JobList.json", "Job list") - @route("/jobs") - @get - List is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. -""") - @query - $filter: string; - - @doc("An OData $select clause.") - @query - $select: string; - - @doc("An OData $expand clause.") - @query - $expand: string; - }, - BatchResponseHeaders & BatchJobListResult - >; - - @summary("Lists the Jobs that have been created under the specified Job Schedule.") - @doc("Lists the Jobs that have been created under the specified Job Schedule.") - @example( - "./examples/JobListFromJobSchedule.json", - "List Job Under Job Schedule" - ) - @route("/jobschedules/{jobScheduleId}/jobs") - @get - ListFromJobSchedule is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Job Schedule from which you want to get a list of Jobs.") - @path - jobScheduleId: string; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. -""") - @query - $filter: string; - - @doc("An OData $select clause.") - @query - $select: string; - - @doc("An OData $expand clause.") - @query - $expand: string; - }, - BatchResponseHeaders & BatchJobListResult - >; - - @summary(""" -Lists the execution status of the Job Preparation and Job Release Task for the -specified Job across the Compute Nodes where the Job has run. -""") - @doc(""" -This API returns the Job Preparation and Job Release Task status on all Compute -Nodes that have run the Job Preparation or Job Release Task. This includes -Compute Nodes which have since been removed from the Pool. If this API is -invoked on a Job which has no Job Preparation or Job Release Task, the Batch -service returns HTTP status code 409 (Conflict) with an error code of -JobPreparationTaskNotSpecified. -""") - @example( - "./examples/JobListPreparationAndReleaseTaskStatus.json", - "Job list preparation and release task status" - ) - @route("/jobs/{jobId}/jobpreparationandreleasetaskstatus") - @get - ListPreparationAndReleaseTaskStatus is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Job.") - @path - jobId: string; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. -""") - @query - $filter: string; - - @doc("An OData $select clause.") - @query - $select: string; - }, - BatchResponseHeaders & BatchJobListPreparationAndReleaseTaskStatusResult - >; - - @summary("Gets the Task counts for the specified Job.") - @doc(""" -Task counts provide a count of the Tasks by active, running or completed Task -state, and a count of Tasks which succeeded or failed. Tasks in the preparing -state are counted as running. Note that the numbers returned may not always be -up to date. If you need exact task counts, use a list query. -""") - @example("./examples/JobGetTaskCounts.json", "Job get task counts") - @route("/jobs/{jobId}/taskcounts") - @get - GetTaskCounts is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Job.") - @path - jobId: string; - }, - BatchClientResponseHeaders & TaskCountsResult - >; -} - -@Azure.ClientGenerator.Core.operationGroup -@tag("Certificates") -interface CertificateOperations { - @summary("Adds a Certificate to the specified Account.") - @doc("Adds a Certificate to the specified Account.") - @example("./examples/CertificateAdd.json", "Certificate add") - @route("/certificates") - @post - Add is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The Certificate to be added.") - @body - certificate: Certificate; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "201"; - } - >; - - @summary("Lists all of the Certificates that have been added to the specified Account.") - @doc("Lists all of the Certificates that have been added to the specified Account.") - @example("./examples/CertificateList.json", "Certificate list") - @route("/certificates") - @get - List is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. -""") - @query - $filter: string; - - @doc("An OData $select clause.") - @query - $select: string; - }, - BatchResponseHeaders & CertificateListResult - >; - - @summary("Cancels a failed deletion of a Certificate from the specified Account.") - @doc(""" -If you try to delete a Certificate that is being used by a Pool or Compute -Node, the status of the Certificate changes to deleteFailed. If you decide that -you want to continue using the Certificate, you can use this operation to set -the status of the Certificate back to active. If you intend to delete the -Certificate, you do not need to run this operation after the deletion failed. -You must make sure that the Certificate is not being used by any resources, and -then you can try again to delete the Certificate. -""") - @example( - "./examples/CertificateCancelDelete.json", - "Certificate cancel delete" - ) - @route("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete") - @post - CancelDeletion is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The algorithm used to derive the thumbprint parameter. This must be sha1.") - @path - thumbprintAlgorithm: string; - - @doc("The thumbprint of the Certificate being deleted.") - @path - thumbprint: string; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "204"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Deletes a Certificate from the specified Account.") - @doc(""" -You cannot delete a Certificate if a resource (Pool or Compute Node) is using -it. Before you can delete a Certificate, you must therefore make sure that the -Certificate is not associated with any existing Pools, the Certificate is not -installed on any Nodes (even if you remove a Certificate from a Pool, it is not -removed from existing Compute Nodes in that Pool until they restart), and no -running Tasks depend on the Certificate. If you try to delete a Certificate -that is in use, the deletion fails. The Certificate status changes to -deleteFailed. You can use Cancel Delete Certificate to set the status back to -active if you decide that you want to continue using the Certificate. -""") - @example("./examples/CertificateDelete.json", "Certificate delete") - @route("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})") - @delete - Delete is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The algorithm used to derive the thumbprint parameter. This must be sha1.") - @path - thumbprintAlgorithm: string; - - @doc("The thumbprint of the Certificate to be deleted.") - @path - thumbprint: string; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "202"; - } - >; - - @doc("Gets information about the specified Certificate.") - @example("./examples/CertificateGet.json", "Certificate get") - @route("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})") - @get - Get is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The algorithm used to derive the thumbprint parameter. This must be sha1.") - @path - thumbprintAlgorithm: string; - - @doc("The thumbprint of the Certificate to get.") - @path - thumbprint: string; - - @doc("An OData $select clause.") - @query - $select: string; - }, - BatchResponseHeaders & Certificate - >; -} - -@Azure.ClientGenerator.Core.operationGroup -@tag("Files") -interface File { - @summary("Deletes the specified Task file from the Compute Node where the Task ran.") - @doc("Deletes the specified Task file from the Compute Node where the Task ran.") - @example("./examples/FileDeleteFromTask.json", "File delete from task") - @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") - @delete - DeleteFromTask is Azure.Core.Foundations.Operation< - BatchJobFileClientRequestHeaders & { - @doc(""" -Whether to delete children of a directory. If the filePath parameter represents -a directory instead of a file, you can set recursive to true to delete the -directory and all of the files and subdirectories in it. If recursive is false -then the directory must be empty or deletion will fail. -""") - @query - recursive: boolean; - }, - BatchClientResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - } - >; - - @doc("Returns the content of the specified Task file.") - @example("./examples/FileGetFromTask.json", "Get File From Task") - @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") - @get - GetFromTask is Azure.Core.Foundations.Operation< - BatchJobFileClientRequestHeaders & - BatchModifiedSinceHeaders & { - @doc(""" -The byte range to be retrieved. The default is to retrieve the entire file. The -format is bytes=startRange-endRange. -""") - @header - "ocp-range"?: string; - }, - BatchResponseHeaders & - FileResponse & { - @header("content-type") contentType: "application/octet-stream"; - - @body - @doc("A response containing the file content.") - file: bytes; - } - >; - - @doc("Gets the properties of the specified Task file.") - @example( - "./examples/FileGetPropertiesFromTask.json", - "File get properties from task" - ) - @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") - @head - GetPropertiesFromTask is Azure.Core.Foundations.Operation< - BatchJobFileClientRequestHeaders & BatchModifiedSinceHeaders, - BatchResponseHeaders & FileResponse - >; - - @summary("Deletes the specified file from the Compute Node.") - @doc("Deletes the specified file from the Compute Node.") - @example("./examples/FileDeleteFromNode.json", "File delete from node") - @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") - @delete - DeleteFromComputeNode is Azure.Core.Foundations.Operation< - BatchPoolFileClientRequestHeaders & { - @doc(""" -Whether to delete children of a directory. If the filePath parameter represents -a directory instead of a file, you can set recursive to true to delete the -directory and all of the files and subdirectories in it. If recursive is false -then the directory must be empty or deletion will fail. -""") - @query - recursive?: boolean; - }, - BatchClientResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - } - >; - - @doc("Returns the content of the specified Compute Node file.") - @example("./examples/FileGetFromNode.json", "Get File From Compute Node") - @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") - @get - GetFromComputeNode is Azure.Core.Foundations.Operation< - BatchPoolFileClientRequestHeaders & - BatchModifiedSinceHeaders & { - @doc(""" -The byte range to be retrieved. The default is to retrieve the entire file. The -format is bytes=startRange-endRange. -""") - @header - "ocp-range"?: string; - }, - BatchResponseHeaders & - FileResponse & { - @header("content-type") contentType: "application/octet-stream"; - - @body - @doc("A response containing the file content.") - file: bytes; - } - >; - - @doc("Gets the properties of the specified Compute Node file.") - @example( - "./examples/FileGetPropertiesFromNode.json", - "File get properties from node" - ) - @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") - @head - GetPropertiesFromComputeNode is Azure.Core.Foundations.Operation< - BatchPoolFileClientRequestHeaders & BatchModifiedSinceHeaders, - BatchResponseHeaders & FileResponse - >; - - @summary("Lists the files in a Task's directory on its Compute Node.") - @doc("Lists the files in a Task's directory on its Compute Node.") - @example("./examples/FileListFromTask.json", "File list from task") - @route("/jobs/{jobId}/tasks/{taskId}/files") - @get - ListFromTask is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Job that contains the Task.") - @path - jobId: string; - - @doc("The ID of the Task whose files you want to list.") - @path - taskId: string; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. -""") - @query - $filter: string; - - @doc(""" -Whether to list children of the Task directory. This parameter can be used in -combination with the filter parameter to list specific type of files. -""") - @query - recursive: boolean; - }, - BatchResponseHeaders & NodeFileListResult - >; - - @summary("Lists all of the files in Task directories on the specified Compute Node.") - @doc("Lists all of the files in Task directories on the specified Compute Node.") - @example("./examples/FileListFromNode.json", "File list from node") - @route("/pools/{poolId}/nodes/{nodeId}/files") - @get - ListFromComputeNode is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node whose files you want to list.") - @path - nodeId: string; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. -""") - @query - $filter: string; - - @doc("Whether to list children of a directory.") - @query - recursive: boolean; - }, - BatchResponseHeaders & NodeFileListResult - >; -} - -@Azure.ClientGenerator.Core.operationGroup -@tag("JobSchedules") -interface JobSchedule { - @summary("Checks the specified Job Schedule exists.") - @doc("Checks the specified Job Schedule exists.") - @example("./examples/JobScheduleExists.json", "Check Job Schedule Exists") - @route("/jobschedules/{jobScheduleId}") - @head - Exists is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job Schedule which you want to check.") - @path - jobScheduleId: string; - }, - (BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - }) | { - @doc("The Job Schedule does not exist.") - @statusCode - code: "204"; - } - >; - - @summary("Deletes a Job Schedule from the specified Account.") - @doc(""" -When you delete a Job Schedule, this also deletes all Jobs and Tasks under that -schedule. When Tasks are deleted, all the files in their working directories on -the Compute Nodes are also deleted (the retention period is ignored). The Job -Schedule statistics are no longer accessible once the Job Schedule is deleted, -though they are still counted towards Account lifetime statistics. -""") - @example("./examples/JobScheduleDelete.json", "JobSchedule delete") - @route("/jobschedules/{jobScheduleId}") - @delete - Delete is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job Schedule to delete.") - @path - jobScheduleId: string; - }, - DeleteResponseHeaders - >; - - @doc("Gets information about the specified Job Schedule.") - @example("./examples/JobScheduleGet.json", "JobSchedule get") - @route("/jobschedules/{jobScheduleId}") - @get - Get is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job Schedule to get.") - @path - jobScheduleId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, - BatchResponseHeaders & BatchJobSchedule - >; - - @summary("Updates the properties of the specified Job Schedule.") - @doc(""" -This replaces only the Job Schedule properties specified in the request. For -example, if the schedule property is not specified with this request, then the -Batch service will keep the existing schedule. Changes to a Job Schedule only -impact Jobs created by the schedule after the update has taken place; currently -running Jobs are unaffected. -""") - @example("./examples/JobSchedulePatch.json", "JobSchedule patch") - @route("/jobschedules/{jobScheduleId}") - @patch - Patch is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job Schedule to update.") - @path - jobScheduleId: string; - - @doc("The parameters for the request.") - @body - jobScheduleUpdate: BatchJobSchedule; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Updates the properties of the specified Job Schedule.") - @doc(""" -This fully replaces all the updatable properties of the Job Schedule. For -example, if the schedule property is not specified with this request, then the -Batch service will remove the existing schedule. Changes to a Job Schedule only -impact Jobs created by the schedule after the update has taken place; currently -running Jobs are unaffected. -""") - @example("./examples/JobScheduleUpdate.json", "JobSchedule update") - @route("/jobschedules/{jobScheduleId}") - @put - Update is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job Schedule to update.") - @path - jobScheduleId: string; - - @doc("The parameters for the request.") - @body - jobSchedule: BatchJobSchedule; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Disables a Job Schedule.") - @doc("No new Jobs will be created until the Job Schedule is enabled again.") - @example("./examples/JobScheduleDisable.json", "JobSchedule disable") - @route("/jobschedules/{jobScheduleId}/disable") - @post - Disable is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job Schedule to disable.") - @path - jobScheduleId: string; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "204"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Enables a Job Schedule.") - @doc("Enables a Job Schedule.") - @example("./examples/JobScheduleEnable.json", "JobSchedule enable") - @route("/jobschedules/{jobScheduleId}/enable") - @post - Enable is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job Schedule to enable.") - @path - jobScheduleId: string; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "204"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Terminates a Job Schedule.") - @doc("Terminates a Job Schedule.") - @example("./examples/JobScheduleTerminate.json", "JobSchedule terminate") - @route("/jobschedules/{jobScheduleId}/terminate") - @post - Terminate is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job Schedule to terminates.") - @path - jobScheduleId: string; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "202"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Adds a Job Schedule to the specified Account.") - @doc("Adds a Job Schedule to the specified Account.") - @example("./examples/JobScheduleAdd_Basic.json", "Add a basic JobSchedule") - @example( - "./examples/JobScheduleAdd_Complex.json", - "Add a complex JobScheduleAdd" - ) - @route("/jobschedules") - @post - Add is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The Job Schedule to be added.") - @body - jobSchedule: BatchJobSchedule; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "201"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Lists all of the Job Schedules in the specified Account.") - @doc("Lists all of the Job Schedules in the specified Account.") - @example("./examples/JobScheduleList.json", "JobSchedule list") - @route("/jobschedules") - @get - List is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. -""") - @query - $filter?: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, - BatchResponseHeaders & BatchJobScheduleListResult - >; -} - -@Azure.ClientGenerator.Core.operationGroup -@tag("Tasks") -interface Task { - @summary("Adds a Task to the specified Job.") - @doc(""" -The maximum lifetime of a Task from addition to completion is 180 days. If a -Task has not completed within 180 days of being added it will be terminated by -the Batch service and left in whatever state it was in at that time. -""") - @example("./examples/TaskAdd_Basic.json", "Add a basic task") - @example( - "./examples/TaskAdd_ExitConditions.json", - "Add a task with exit conditions" - ) - @example( - "./examples/TaskAdd_ContainerSettings.json", - "Add a task with container settings" - ) - @example( - "./examples/TaskAdd_RequiredSlots.json", - "Add a task with extra slot requirement" - ) - @route("/jobs/{jobId}/tasks") - @post - Add is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Job to which the Task is to be added.") - @path - jobId: string; - - @doc("The Task to be added.") - @body - task: BatchTask; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "201"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Lists all of the Tasks that are associated with the specified Job.") - @doc(""" -For multi-instance Tasks, information such as affinityId, executionInfo and -nodeInfo refer to the primary Task. Use the list subtasks API to retrieve -information about subtasks. -""") - @example("./examples/TaskList.json", "Task list") - @route("/jobs/{jobId}/tasks") - @get - List is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Job.") - @path - jobId: string; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. -""") - @query - $filter?: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, - BatchResponseHeaders & BatchTaskListResult - >; - - @summary("Adds a collection of Tasks to the specified Job.") - @doc(""" -Note that each Task must have a unique ID. The Batch service may not return the -results for each Task in the same order the Tasks were submitted in this -request. If the server times out or the connection is closed during the -request, the request may have been partially or fully processed, or not at all. -In such cases, the user should re-issue the request. Note that it is up to the -user to correctly handle failures when re-issuing a request. For example, you -should use the same Task IDs during a retry so that if the prior operation -succeeded, the retry will not create extra Tasks unexpectedly. If the response -contains any Tasks which failed to add, a client can retry the request. In a -retry, it is most efficient to resubmit only Tasks that failed to add, and to -omit Tasks that were successfully added on the first attempt. The maximum -lifetime of a Task from addition to completion is 180 days. If a Task has not -completed within 180 days of being added it will be terminated by the Batch -service and left in whatever state it was in at that time. -""") - @example( - "./examples/TaskAddCollection_Basic.json", - "Add a basic collection of tasks" - ) - @example( - "./examples/TaskAddCollection_Complex.json", - "Add a complex collection of tasks" - ) - @route("/jobs/{jobId}/addtaskcollection") - @post - AddCollection is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Job to which the Task collection is to be added.") - @path - jobId: string; - - @doc("The Tasks to be added.") - @body - taskCollection: BatchTaskCollection; - }, - BatchClientResponseHeaders & TaskAddCollectionResult - >; - - @summary("Deletes a Task from the specified Job.") - @doc(""" -When a Task is deleted, all of the files in its directory on the Compute Node -where it ran are also deleted (regardless of the retention time). For -multi-instance Tasks, the delete Task operation applies synchronously to the -primary task; subtasks and their files are then deleted asynchronously in the -background. -""") - @example("./examples/TaskDelete.json", "Task delete") - @route("/jobs/{jobId}/tasks/{taskId}") - @delete - Delete is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job from which to delete the Task.") - @path - jobId: string; - - @doc("The ID of the Task to delete.") - @path - taskId: string; - }, - BatchClientResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - } - >; - - @summary("Gets information about the specified Task.") - @doc(""" -For multi-instance Tasks, information such as affinityId, executionInfo and -nodeInfo refer to the primary Task. Use the list subtasks API to retrieve -information about subtasks. -""") - @example("./examples/TaskGet.json", "Task get") - @route("/jobs/{jobId}/tasks/{taskId}") - @get - Get is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job that contains the Task.") - @path - jobId: string; - - @doc("The ID of the Task to get information about.") - @path - taskId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, - BatchResponseHeaders & - BatchTask & { - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @doc("Updates the properties of the specified Task.") - @example("./examples/TaskUpdate.json", "Task update") - @route("/jobs/{jobId}/tasks/{taskId}") - @put - Update is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job containing the Task.") - @path - jobId: string; - - @doc("The ID of the Task to update.") - @path - taskId: string; - - @doc("The parameters for the request.") - @body - task: BatchTask; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary(""" -Lists all of the subtasks that are associated with the specified multi-instance -Task. -""") - @doc("If the Task is not a multi-instance Task then this returns an empty collection.") - @example("./examples/TaskListSubtasks.json", "Task list subtasks") - @route("/jobs/{jobId}/tasks/{taskId}/subtasksinfo") - @get - ListSubtasks is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Job.") - @path - jobId: string; - - @doc("The ID of the Task.") - @path - taskId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - }, - BatchResponseHeaders & BatchTaskListSubtasksResult - >; - - @summary("Terminates the specified Task.") - @doc(""" -When the Task has been terminated, it moves to the completed state. For -multi-instance Tasks, the terminate Task operation applies synchronously to the -primary task; subtasks are then terminated asynchronously in the background. -""") - @example("./examples/TaskTerminate.json", "Task terminate") - @route("/jobs/{jobId}/tasks/{taskId}/terminate") - @post - Terminate is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job containing the Task.") - @path - jobId: string; - - @doc("The ID of the Task to terminate.") - @path - taskId: string; - }, - BatchResponseHeaders & { - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary(""" -Reactivates a Task, allowing it to run again even if its retry count has been -exhausted. -""") - @doc(""" -Reactivation makes a Task eligible to be retried again up to its maximum retry -count. The Task's state is changed to active. As the Task is no longer in the -completed state, any previous exit code or failure information is no longer -available after reactivation. Each time a Task is reactivated, its retry count -is reset to 0. Reactivation will fail for Tasks that are not completed or that -previously completed successfully (with an exit code of 0). Additionally, it -will fail if the Job has completed (or is terminating or deleting). -""") - @example("./examples/TaskReactivate.json", "Task reactivate") - @route("/jobs/{jobId}/tasks/{taskId}/reactivate") - @post - Reactivate is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & - BatchMatchHeaders & { - @doc("The ID of the Job containing the Task.") - @path - jobId: string; - - @doc("The ID of the Task to reactivate.") - @path - taskId: string; - }, - BatchResponseHeaders & { - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; -} - -@Azure.ClientGenerator.Core.operationGroup -@tag("ComputeNodes") -interface ComputeNodeOperations { - @summary("Adds a user Account to the specified Compute Node.") - @doc(""" -You can add a user Account to a Compute Node only when it is in the idle or -running state. -""") - @example("./examples/NodeAddUser.json", "Node add user") - @route("/pools/{poolId}/nodes/{nodeId}/users") - @post - AddUser is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the machine on which you want to create a user Account.") - @path - nodeId: string; - - @doc("The user Account to be created.") - @body - user: ComputeNodeUser; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "201"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Deletes a user Account from the specified Compute Node.") - @doc(""" -You can delete a user Account to a Compute Node only when it is in the idle or -running state. -""") - @example("./examples/NodeDeleteUser.json", "Node delete user") - @route("/pools/{poolId}/nodes/{nodeId}/users/{userName}") - @delete - DeleteUser is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the machine on which you want to delete a user Account.") - @path - nodeId: string; - - @doc("The name of the user Account to delete.") - @path - userName: string; - }, - BatchClientResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - } - >; - - @summary(""" -Updates the password and expiration time of a user Account on the specified -Compute Node. -""") - @doc(""" -This operation replaces of all the updatable properties of the Account. For -example, if the expiryTime element is not specified, the current value is -replaced with the default value, not left unmodified. You can update a user -Account on a Compute Node only when it is in the idle or running state. -""") - @example("./examples/NodeUpdateUser.json", "Node update user") - @route("/pools/{poolId}/nodes/{nodeId}/users/{userName}") - @put - UpdateUser is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the machine on which you want to update a user Account.") - @path - nodeId: string; - - @doc("The name of the user Account to update.") - @path - userName: string; - - @doc("The parameters for the request.") - @body - parameters: NodeUpdateUserParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Gets information about the specified Compute Node.") - @doc("Gets information about the specified Compute Node.") - @example("./examples/NodeGet_Basic.json", "Node get") - @route("/pools/{poolId}/nodes/{nodeId}") - @get - Get is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node that you want to get information about.") - @path - nodeId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - }, - BatchResponseHeaders & ComputeNode - >; - - @summary("Restarts the specified Compute Node.") - @doc("You can restart a Compute Node only if it is in an idle or running state.") - @example("./examples/NodeReboot.json", "Node reboot") - @route("/pools/{poolId}/nodes/{nodeId}/reboot") - @post - Reboot is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node that you want to restart.") - @path - nodeId: string; - - @doc("The parameters for the request.") - @body - parameters: NodeRebootParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "202"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Reinstalls the operating system on the specified Compute Node.") - @doc(""" -You can reinstall the operating system on a Compute Node only if it is in an -idle or running state. This API can be invoked only on Pools created with the -cloud service configuration property. -""") - @example("./examples/NodeReimage.json", "Node reimage") - @route("/pools/{poolId}/nodes/{nodeId}/reimage") - @post - Reimage is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node that you want to restart.") - @path - nodeId: string; - - @doc("The parameters for the request.") - @body - parameters: NodeReimageParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "202"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Disables Task scheduling on the specified Compute Node.") - @doc(""" -You can disable Task scheduling on a Compute Node only if its current -scheduling state is enabled. -""") - @example("./examples/NodeDisableScheduling.json", "Node disable scheduling") - @route("/pools/{poolId}/nodes/{nodeId}/disablescheduling") - @post - DisableScheduling is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node on which you want to disable Task scheduling.") - @path - nodeId: string; - - @doc("The parameters for the request.") - @body - parameters: NodeDisableSchedulingParameters; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Enables Task scheduling on the specified Compute Node.") - @doc(""" -You can enable Task scheduling on a Compute Node only if its current scheduling -state is disabled -""") - @example("./examples/NodeEnableScheduling.json", "Node enable scheduling") - @route("/pools/{poolId}/nodes/{nodeId}/enablescheduling") - @post - EnableScheduling is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node on which you want to enable Task scheduling.") - @path - nodeId: string; - }, - BatchResponseHeaders & { - @doc("A process exit code.") - @statusCode - code: "200"; - - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } - >; - - @summary("Gets the settings required for remote login to a Compute Node.") - @doc(""" -Before you can remotely login to a Compute Node using the remote login -settings, you must create a user Account on the Compute Node. This API can be -invoked only on Pools created with the virtual machine configuration property. -For Pools created with a cloud service configuration, see the GetRemoteDesktop -API. -""") - @example( - "./examples/NodeGetRemoteLoginSettings.json", - "Node get remote login settings" - ) - @route("/pools/{poolId}/nodes/{nodeId}/remoteloginsettings") - @get - GetRemoteLoginSettings is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node for which to obtain the remote login settings.") - @path - nodeId: string; - }, - BatchResponseHeaders & ComputeNodeGetRemoteLoginSettingsResult - >; - - @summary("Gets the Remote Desktop Protocol file for the specified Compute Node.") - @doc(""" -Before you can access a Compute Node by using the RDP file, you must create a -user Account on the Compute Node. This API can only be invoked on Pools created -with a cloud service configuration. For Pools created with a virtual machine -configuration, see the GetRemoteLoginSettings API. -""") - @example( - "./examples/NodeGetRemoteDesktop.json", - "Get RDP file of the compute node" - ) - @route("/pools/{poolId}/nodes/{nodeId}/rdp") - @get - GetRemoteDesktop is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc(""" -The ID of the Compute Node for which you want to get the Remote Desktop -Protocol file. -""") - @path - nodeId: string; - }, - BatchResponseHeaders & { - @header("content-type") contentType: "application/octet-stream"; - - @body - @doc("A response containing the file content.") - file: bytes; - } - >; - - @summary(""" -Upload Azure Batch service log files from the specified Compute Node to Azure -Blob Storage. -""") - @doc(""" -This is for gathering Azure Batch service log files in an automated fashion -from Compute Nodes if you are experiencing an error and wish to escalate to -Azure support. The Azure Batch service log files should be shared with Azure -support to aid in debugging issues with the Batch service. -""") - @example( - "./examples/NodeUploadBatchServiceLogs.json", - "Upload BatchService Logs" - ) - @route("/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs") - @post - UploadBatchServiceLogs is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc(""" -The ID of the Compute Node for which you want to get the Remote Desktop -Protocol file. -""") - @path - nodeId: string; - - @doc("The Azure Batch service log files upload configuration.") - @body - uploadBatchServiceLogsConfiguration: UploadBatchServiceLogsConfiguration; - }, - BatchClientResponseHeaders & UploadBatchServiceLogsResult - >; - - @summary("Lists the Compute Nodes in the specified Pool.") - @doc("Lists the Compute Nodes in the specified Pool.") - @example("./examples/NodeList.json", "Node list") - @route("/pools/{poolId}/nodes") - @get - List is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Pool from which you want to list Compute Nodes.") - @path - poolId: string; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. -""") - @query - $filter: string; - - @doc("An OData $select clause.") - @query - $select?: string; - }, - BatchResponseHeaders & ComputeNodeListResult - >; -} - -@Azure.ClientGenerator.Core.operationGroup -@tag("ComputeNodes") -interface ComputeNodeExtensionOperations { - @summary("Gets information about the specified Compute Node Extension.") - @doc("Gets information about the specified Compute Node Extension.") - @example( - "./examples/ComputeNodeExtensionGet.json", - "Get compute node extension" - ) - @route("/pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}") - @get - Get is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node that contains the extensions.") - @path - nodeId: string; - - @doc(""" -The name of the of the Compute Node Extension that you want to get information -about. -""") - @path - extensionName: string; - - @doc("An OData $select clause.") - @query - $select?: string; - }, - BatchResponseHeaders & NodeVMExtension - >; - - @summary("Lists the Compute Nodes Extensions in the specified Pool.") - @doc("Lists the Compute Nodes Extensions in the specified Pool.") - @example( - "./examples/ComputeNodeExtensionList.json", - "List compute node extensions" - ) - @route("/pools/{poolId}/nodes/{nodeId}/extensions") - @get - List is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Pool that contains Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node that you want to list extensions.") - @path - nodeId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - }, - BatchResponseHeaders & NodeVMExtensionList - >; -} - -// headers //////////////////// - -@doc("Common header values for pool requests") -model FileResponse { - @doc("A process exit code.") - @statusCode - code: "200"; - - @header - @doc("The file creation time.") - @format("date-time-rfc1123") - "ocp-creation-time"?: string; - - @header - @doc("Whether the object represents a directory.") - "ocp-batch-file-isdirectory": boolean; - - @header - @doc("The URL of the file.") - "ocp-batch-file-url": string; - - @header - @doc("The file mode attribute in octal format.") - "ocp-batch-file-mode": string; - - @header - @doc("The length of the file.") - "Content-Length": int64; - - //@header - //@doc("The content type of the file.") - //"content-Type": string; -} - -@doc("Common header values for pool requests") -model BatchPoolHeaders extends BatchMatchHeaders { - @doc("The ID of the Pool to get.") - @path - poolId: string; -} - -@doc("Common header values for pool requests") -model BatchMatchHeaders extends BatchModifiedSinceHeaders { - @doc(""" -An ETag value associated with the version of the resource known to the client. -The operation will be performed only if the resource's current ETag on the -service exactly matches the value specified by the client. -""") - @header - "If-Match"?: string; - - @doc(""" -An ETag value associated with the version of the resource known to the client. -The operation will be performed only if the resource's current ETag on the -service does not match the value specified by the client. -""") - @header - "If-None-Match"?: string; -} - -@doc("Common header values for modified headers") -model BatchModifiedSinceHeaders { - @doc(""" -A timestamp indicating the last modified time of the resource known to the -client. The operation will be performed only if the resource on the service has -been modified since the specified time. -""") - @header - @format("date-time-rfc1123") - "If-Modified-Since"?: string; - - @doc(""" -A timestamp indicating the last modified time of the resource known to the -client. The operation will be performed only if the resource on the service has -not been modified since the specified time. -""") - @header - @format("date-time-rfc1123") - "If-Unmodified-Since"?: string; -} - -// This is the standard set of headers that most of batch apis return -alias BatchResponseHeaders = BatchClientResponseHeaders & - BatchEtagResponseHeaders; - -@doc("Etag related response header values") -model BatchEtagResponseHeaders { - @doc("The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers.") - @header - ETag?: string; - - @doc("The time at which the resource was last modified.") - @header - @format("date-time-rfc1123") - "Last-Modified"?: string; -} - -@doc("Client related response header values") -model BatchClientResponseHeaders { - @doc("The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true.") - @header - @format("uuid") - "client-request-id"?: string; - - @doc("A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in.") - @header - @format("uuid") - "request-id"?: string; -} - -@doc("The Pool does not exist.") -model PoolDoesntExistResponseHeaders { - @doc("A process exit code.") - @statusCode - code: "404"; -} - -@doc("The parameters for a widget status request") -model DeleteResponseHeaders { - @doc("A process exit code.") - @statusCode - code: "202"; - - @doc("The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true.") - @header - "client-request-id"?: string; - - @doc("A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in.") - @header - "request-id"?: string; -} - -@doc("The parameters for a widget status request") -model BatchApplicationListHeaders extends BatchClientRequestHeaders { - @doc(""" -The maximum number of items to return in the response. A maximum of 1000 -applications can be returned. -""") - @query - @minValue(1) - @maxValue(1000) - maxresults?: int32 = 1000; - - @doc(""" -The time the request was issued. Client libraries typically set this to the -current system clock time; set it explicitly if you are calling the REST API -directly. -""") - @header - @format("date-time-rfc1123") - "ocp-date"?: string; -} - -@doc("Common header parms for Pool related File operartions") -model BatchPoolFileClientRequestHeaders extends BatchClientRequestHeaders { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node from which you want to delete the file.") - @path - nodeId: string; - - @doc("The path to the file or directory that you want to delete.") - @path - filePath: string; -} - -@doc("Common header parms for Job related File operartions") -model BatchJobFileClientRequestHeaders extends BatchClientRequestHeaders { - @doc("The ID of the Job that contains the Task.") - @path - jobId: string; - - @doc("The ID of the Task whose file you want to retrieve.") - @path - taskId: string; - - @doc("The path to the Task file that you want to get the content of.") - @path - filePath: string; -} - -@doc("The parameters for a widget status request") -model BatchClientRequestHeaders { - @doc(""" -The maximum number of items to return in the response. A maximum of 1000 -applications can be returned. -""") - @query - timeOut?: int32 = 30; - - @doc(""" -The caller-generated request identity, in the form of a GUID with no decoration -such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. -""") - @header - @format("uuid") - "client-request-id"?: string; - - @doc("Whether the server should return the client-request-id in the response.") - @header - "return-client-request-id"?: boolean = false; - - @doc(""" -The time the request was issued. Client libraries typically set this to the -current system clock time; set it explicitly if you are calling the REST API -directly. -""") - @header - @format("date-time-rfc1123") - "ocp-date"?: string; -} - -@doc("The parameters for a widget status request") -model Pool_ListUsageMetricRequestHeaders { - @doc(""" -The earliest time from which to include metrics. This must be at least two and -a half hours before the current time. If not specified this defaults to the -start time of the last aggregation interval currently available. -""") - @query - starttime?: utcDateTime; - - @doc(""" -The latest time from which to include metrics. This must be at least two hours -before the current time. If not specified this defaults to the end time of the -last aggregation interval currently available. -""") - @query - endtime?: utcDateTime; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. -""") - @query - $filter?: string; -} diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/README.md b/packages/typespec-test/test/batch_modular/generated/typespec-ts/README.md index 37d9006e11..27abc762cf 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/README.md +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/README.md @@ -1,6 +1,6 @@ -# Azure BatchService REST client library for JavaScript +# Azure Batch REST client library for JavaScript -A client for issuing REST requests to the Azure Batch service. +Azure Batch provides Cloud-scale job scheduling and compute management. **Please rely heavily on our [REST client docs](https://github.com/Azure/azure-sdk-for-js/blob/main/documentation/rest-clients.md) to use this library** @@ -21,13 +21,13 @@ Key links: ### Install the `@azure-rest/batch` package -Install the Azure BatchService REST client REST client library for JavaScript with `npm`: +Install the Azure Batch REST client REST client library for JavaScript with `npm`: ```bash npm install @azure-rest/batch ``` -### Create and authenticate a `BatchServiceClient` +### Create and authenticate a `BatchClient` To use an [Azure Active Directory (AAD) token credential](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/identity/identity/samples/AzureIdentityExamples.md#authenticating-with-a-pre-fetched-access-token), provide an instance of the desired credential type obtained from the diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/package-files/package.json b/packages/typespec-test/test/batch_modular/generated/typespec-ts/package-files/package.json deleted file mode 100644 index 78b110f9bb..0000000000 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/package-files/package.json +++ /dev/null @@ -1,133 +0,0 @@ -{ - "name": "@azure-rest/batch", - "version": "1.0.0-beta.1", - "description": "\"Batch Service\"", - "sdk-type": "client", - "main": "dist/index.js", - "module": "dist-esm/src/index.js", - "react-native": { "./dist/index.js": "./dist-esm/src/index.js" }, - "type": "module", - "exports": { - ".": { - "types": "./types/src/index.d.ts", - "require": "./dist/index.cjs", - "import": "./dist-esm/src/index.js" - }, - "./api": { - "types": "./types/src/api/index.d.ts", - "import": "./dist-esm/src/api/index.js" - }, - "./rest": { - "types": "./types/src/rest/index.d.ts", - "import": "./dist-esm/src/rest/index.js" - } - }, - "//metadata": { - "constantPaths": [ - { "path": "src/utils/constants.ts", "prefix": "SDK_VERSION" } - ] - }, - "types": "types/latest/batch.d.ts", - "typesVersions": { "<3.6": { "*": ["types/3.1/batch.d.ts"] } }, - "scripts": { - "audit": "node ../../../common/scripts/rush-audit.js && rimraf node_modules package-lock.json && npm i --package-lock-only 2>&1 && npm audit", - "build:samples": "echo Obsolete", - "build:test:browser": "tsc -p . && rollup -c rollup.test.config.js 2>&1", - "build:test:node": "tsc -p . && dev-tool run bundle", - "build:test": "tsc -p . && rollup -c rollup.test.config.js 2>&1", - "build:types": "downlevel-dts types/latest/ types/3.1/", - "build:output": "node scripts/renameOutput.mjs", - "build": "npm run build:test && api-extractor run --local && npm run build:types && npm run build:output", - "check-format": "prettier --list-different --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"test/**/*.ts\" \"*.{js,json}\"", - "clean": "rimraf dist dist-* temp types *.tgz *.log", - "execute:samples": "dev-tool samples run samples-dev", - "extract-api": "tsc -p . && api-extractor run --local", - "format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"test/**/*.ts\" \"samples-dev/**/*.ts\" \"*.{js,json}\"", - "integration-test:browser": "echo skipped", - "integration-test:node": "nyc mocha --timeout 600000 \"test/internal/**/*.spec.ts\" \"test/public/**/*.spec.ts\"", - "integration-test": "npm run integration-test:node && npm run integration-test:browser", - "lint:fix": "eslint README.md package.json api-extractor.json src test --ext .ts,.javascript,.js --fix --fix-type [problem,suggestion]", - "lint": "eslint README.md package.json api-extractor.json src test --ext .ts,.javascript,.js", - "pack": "npm pack 2>&1", - "test:browser": "npm run clean && npm run build:test && npm run integration-test:browser", - "test:node": "npm run clean && tsc -p . && npm run integration-test:node", - "test": "npm run clean && tsc -p . && npm run unit-test:node && dev-tool run bundle && npm run unit-test:browser && npm run integration-test", - "unit-test:browser": "karma start karma.conf.cjs --single-run", - "unit-test:node": "mocha --exclude \"test/**/browser/*.spec.ts\" \"test/internal/unit/{,!(browser)/**/}*.spec.ts\" \"test/public/unit/{,!(browser)/**/}*.spec.ts\"", - "unit-test": "npm run unit-test:node && npm run unit-test:browser" - }, - "files": [ - "dist/", - "dist-esm/src/", - "types", - "types/src", - "types/latest/", - "types/3.1/", - "README.md", - "LICENSE" - ], - "repository": "github:Azure/azure-sdk-for-js", - "engines": { "node": ">=18.0.0" }, - "keywords": ["azure", "cloud", "typescript"], - "author": "Microsoft Corporation", - "license": "MIT", - "bugs": { "url": "https://github.com/Azure/azure-sdk-for-js/issues" }, - "homepage": "https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/batch/batch/README.md", - "sideEffects": false, - "prettier": "@azure/eslint-plugin-azure-sdk/prettier.json", - "devDependencies": { - "@azure/dev-tool": "^1.0.0", - "@azure/eslint-plugin-azure-sdk": "^3.0.0", - "@azure-tools/test-recorder": "^3.0.0", - "@azure/test-utils": "^1.0.0", - "@microsoft/api-extractor": "^7.31.1", - "@rollup/plugin-commonjs": "^24.0.0", - "@rollup/plugin-inject": "^5.0.0", - "@rollup/plugin-json": "^6.0.0", - "@rollup/plugin-multi-entry": "^6.0.0", - "@rollup/plugin-node-resolve": "^13.1.3", - "@rollup/plugin-replace": "^5.0.0", - "@rollup/plugin-typescript": "^11.0.0", - "@types/chai": "^4.3.1", - "@types/mocha": "^10.0.0", - "@types/node": "^18.0.0", - "chai": "^4.3.6", - "cross-env": "^7.0.3", - "dotenv": "^16.0.0", - "downlevel-dts": "^0.10.0", - "eslint": "^8.16.0", - "esm": "^3.2.25", - "karma": "^6.4.0", - "karma-chrome-launcher": "^3.1.1", - "karma-coverage": "^2.2.0", - "karma-edge-launcher": "^0.4.2", - "karma-env-preprocessor": "^0.1.1", - "karma-firefox-launcher": "^2.1.2", - "karma-json-preprocessor": "^0.3.3", - "karma-json-to-file-reporter": "^1.0.1", - "karma-junit-reporter": "^2.0.1", - "karma-mocha": "^2.0.1", - "karma-mocha-reporter": "^2.2.5", - "karma-sourcemap-loader": "^0.3.8", - "mocha": "^10.0.0", - "mocha-junit-reporter": "^2.0.2", - "nyc": "^15.1.0", - "prettier": "^2.5.1", - "puppeteer": "^19.2.2", - "rimraf": "^5.0.0", - "rollup": "^2.0.0", - "rollup-plugin-shim": "^1.0.0", - "rollup-plugin-sourcemaps": "^0.6.3", - "ts-node": "^10.8.1", - "typescript": "~5.2.0", - "util": "^0.12.4" - }, - "dependencies": { - "@azure-rest/core-client": "^1.1.2", - "@azure/core-auth": "^1.4.1", - "@azure/core-rest-pipeline": "^1.8.1", - "@azure/core-util": "^1.1.0", - "@azure/logger": "^1.0.3", - "tslib": "^2.4.0" - } -} diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/package.json b/packages/typespec-test/test/batch_modular/generated/typespec-ts/package.json index 3821c13964..f0bce94891 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/package.json +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/package.json @@ -6,9 +6,25 @@ "description": "Batch Service", "keywords": ["node", "azure", "cloud", "typescript", "browser", "isomorphic"], "license": "MIT", + "type": "module", "main": "dist/index.js", "module": "./dist-esm/src/index.js", "types": "./types/batch.d.ts", + "exports": { + ".": { + "types": "./types/src/index.d.ts", + "require": "./dist/index.cjs", + "import": "./dist-esm/src/index.js" + }, + "./api": { + "types": "./types/src/api/index.d.ts", + "import": "./dist-esm/src/api/index.js" + }, + "./models": { + "types": "./types/src/models/index.d.ts", + "import": "./dist-esm/src/models/index.js" + } + }, "repository": "github:Azure/azure-sdk-for-js", "bugs": { "url": "https://github.com/Azure/azure-sdk-for-js/issues" }, "files": [ @@ -28,13 +44,13 @@ "build:test": "tsc -p . && rollup -c 2>&1", "build:debug": "echo skipped.", "check-format": "prettier --list-different --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"*.{js,json}\" \"test/**/*.ts\"", - "clean": "rimraf dist dist-browser dist-esm test-dist temp types *.tgz *.log", + "clean": "rimraf --glob dist dist-browser dist-esm test-dist temp types *.tgz *.log", "execute:samples": "echo skipped", "extract-api": "rimraf review && mkdirp ./review && api-extractor run --local", "format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"*.{js,json}\" \"test/**/*.ts\"", "generate:client": "echo skipped", "integration-test:browser": "karma start --single-run", - "integration-test:node": "nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 5000000 --full-trace \"dist-esm/test/{,!(browser)/**/}*.spec.js\"", + "integration-test:node": "nyc mocha --require source-map-support/register.js --timeout 5000000 --full-trace \"dist-esm/test/{,!(browser)/**/}*.spec.js\"", "integration-test": "npm run integration-test:node && npm run integration-test:browser", "lint:fix": "eslint package.json api-extractor.json src test --ext .ts --fix --fix-type [problem,suggestion]", "lint": "eslint package.json api-extractor.json src test --ext .ts", @@ -43,7 +59,7 @@ "test:node": "npm run clean && npm run build:test && npm run unit-test:node", "test": "npm run clean && npm run build:test && npm run unit-test", "unit-test": "npm run unit-test:node && npm run unit-test:browser", - "unit-test:node": "mocha -r esm --require ts-node/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"test/{,!(browser)/**/}*.spec.ts\"", + "unit-test:node": "mocha --full-trace \"test/{,!(browser)/**/}*.spec.ts\"", "unit-test:browser": "karma start --single-run", "build": "npm run clean && tsc && rollup -c 2>&1 && npm run minify && mkdirp ./review && npm run extract-api", "minify": "uglifyjs -c -m --comments --source-map \"content='./dist/index.js.map'\" -o ./dist/index.min.js ./dist/index.js" @@ -52,11 +68,12 @@ "autoPublish": false, "dependencies": { "@azure/core-auth": "^1.3.0", - "@azure-rest/core-client": "^1.1.3", - "@azure/core-rest-pipeline": "^1.8.0", + "@azure-rest/core-client": "^1.1.4", + "@azure/core-rest-pipeline": "^1.12.0", "@azure/logger": "^1.0.0", "tslib": "^2.2.0", - "@azure/core-paging": "^1.5.0" + "@azure/core-paging": "^1.5.0", + "@azure/core-util": "^1.4.0" }, "devDependencies": { "@microsoft/api-extractor": "^7.31.1", @@ -95,9 +112,15 @@ "karma-source-map-support": "~1.4.0", "karma-sourcemap-loader": "^0.4.0", "karma": "^6.2.0", - "c8": "^8.0.0" + "c8": "^8.0.0", + "ts-node": "^10.0.0" }, "browser": { "./dist-esm/test/public/utils/env.js": "./dist-esm/test/public/utils/env.browser.js" + }, + "mocha": { + "extension": ["ts"], + "timeout": "1200000", + "loader": "ts-node/esm" } } diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/review/batch.api.md b/packages/typespec-test/test/batch_modular/generated/typespec-ts/review/batch.api.md new file mode 100644 index 0000000000..c9b29c3c49 --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/review/batch.api.md @@ -0,0 +1,2237 @@ +## API Report File for "@azure-rest/batch" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { ClientOptions } from '@azure-rest/core-client'; +import { OperationOptions } from '@azure-rest/core-client'; +import { Pipeline } from '@azure/core-rest-pipeline'; +import { TokenCredential } from '@azure/core-auth'; + +// @public +export type AccessScope = string; + +// @public +export interface AccountListSupportedImagesResult { + "odata.nextLink"?: string; + value?: ImageInformation[]; +} + +// @public +export interface AffinityInformation { + affinityId: string; +} + +// @public +export type AllocationState = string; + +// @public +export interface ApplicationListResult { + "odata.nextLink"?: string; + value?: BatchApplication[]; +} + +// @public +export interface ApplicationPackageReference { + applicationId: string; + version?: string; +} + +// @public +export interface AuthenticationTokenSettings { + access?: AccessScope[]; +} + +// @public +export interface AutoPoolSpecification { + autoPoolIdPrefix?: string; + keepAlive?: boolean; + pool?: PoolSpecification; + poolLifetimeOption: PoolLifetimeOption; +} + +// @public +export interface AutoScaleRun { + error?: AutoScaleRunError; + results?: string; + timestamp: Date; +} + +// @public +export interface AutoScaleRunError { + code?: string; + message?: string; + values?: NameValuePair[]; +} + +// @public +export type AutoUserScope = string; + +// @public +export interface AutoUserSpecification { + elevationLevel?: ElevationLevel; + scope?: AutoUserScope; +} + +// @public +export interface AzureBlobFileSystemConfiguration { + accountKey?: string; + accountName: string; + blobfuseOptions?: string; + containerName: string; + identityReference?: BatchNodeIdentityReference; + relativeMountPath: string; + sasKey?: string; +} + +// @public +export interface AzureFileShareConfiguration { + accountKey: string; + accountName: string; + azureFileUrl: string; + mountOptions?: string; + relativeMountPath: string; +} + +// @public +export interface BatchApplication { + displayName: string; + id: string; + versions: string[]; +} + +// @public +export interface BatchCertificate { + certificateFormat?: CertificateFormat; + data: Uint8Array; + readonly deleteCertificateError?: DeleteCertificateError; + password?: string; + readonly previousState?: CertificateState; + readonly previousStateTransitionTime?: Date; + readonly publicData?: Uint8Array; + readonly state?: CertificateState; + readonly stateTransitionTime?: Date; + thumbprint: string; + thumbprintAlgorithm: string; + readonly url?: string; +} + +// @public (undocumented) +export class BatchClient { + constructor(endpoint: string, credential: TokenCredential, options?: BatchClientOptions); + cancelCertificateDeletion(thumbprintAlgorithm: string, thumbprint: string, options?: CancelCertificateDeletionOptions): Promise; + createCertificate(body: BatchCertificate, options?: CreateCertificateOptions): Promise; + createJob(body: BatchJobCreateOptions, options?: CreateJobOptions): Promise; + createJobSchedule(body: BatchJobScheduleCreateOptions, options?: CreateJobScheduleOptions): Promise; + createNodeUser(poolId: string, nodeId: string, body: BatchNodeUserCreateOptions, options?: CreateNodeUserOptions): Promise; + createPool(body: BatchPoolCreateOptions, options?: CreatePoolOptions): Promise; + createTask(jobId: string, body: BatchTaskCreateOptions, options?: CreateTaskOptions): Promise; + createTaskCollection(jobId: string, collection: BatchTaskCollection, options?: CreateTaskCollectionOptions): Promise; + deleteCertificate(thumbprintAlgorithm: string, thumbprint: string, options?: DeleteCertificateOptions): Promise; + deleteJob(jobId: string, options?: DeleteJobOptions): Promise; + deleteJobSchedule(jobScheduleId: string, options?: DeleteJobScheduleOptions): Promise; + deleteNodeFile(poolId: string, nodeId: string, filePath: string, options?: DeleteNodeFileOptions): Promise; + deleteNodeUser(poolId: string, nodeId: string, userName: string, options?: DeleteNodeUserOptions): Promise; + deletePool(poolId: string, options?: DeletePoolOptions): Promise; + deleteTask(jobId: string, taskId: string, options?: DeleteTaskOptions): Promise; + deleteTaskFile(jobId: string, taskId: string, filePath: string, options?: DeleteTaskFileOptions): Promise; + disableJob(jobId: string, body: BatchJobDisableOptions, options?: DisableJobOptions): Promise; + disableJobSchedule(jobScheduleId: string, options?: DisableJobScheduleOptions): Promise; + disableNodeScheduling(poolId: string, nodeId: string, body: NodeDisableSchedulingOptions, options?: DisableNodeSchedulingOptions): Promise; + disablePoolAutoScale(poolId: string, options?: DisablePoolAutoScaleOptions): Promise; + enableJob(jobId: string, options?: EnableJobOptions): Promise; + enableJobSchedule(jobScheduleId: string, options?: EnableJobScheduleOptions): Promise; + enableNodeScheduling(poolId: string, nodeId: string, options?: EnableNodeSchedulingOptions): Promise; + enablePoolAutoScale(poolId: string, body: BatchPoolEnableAutoScaleOptions, options?: EnablePoolAutoScaleOptions): Promise; + evaluatePoolAutoScale(poolId: string, body: BatchPoolEvaluateAutoScaleOptions, options?: EvaluatePoolAutoScaleOptions): Promise; + getApplication(applicationId: string, options?: GetApplicationOptions): Promise; + getCertificate(thumbprintAlgorithm: string, thumbprint: string, options?: GetCertificateOptions): Promise; + getJob(jobId: string, options?: GetJobOptions): Promise; + getJobSchedule(jobScheduleId: string, options?: GetJobScheduleOptions): Promise; + getJobTaskCounts(jobId: string, options?: GetJobTaskCountsOptions): Promise; + getNode(poolId: string, nodeId: string, options?: GetNodeOptions): Promise; + getNodeExtension(poolId: string, nodeId: string, extensionName: string, options?: GetNodeExtensionOptions): Promise; + getNodeFile(poolId: string, nodeId: string, filePath: string, options?: GetNodeFileOptions): Promise; + getNodeFileProperties(poolId: string, nodeId: string, filePath: string, options?: GetNodeFilePropertiesOptions): Promise; + getNodeRemoteDesktopFile(poolId: string, nodeId: string, options?: GetNodeRemoteDesktopFileOptions): Promise; + getNodeRemoteLoginSettings(poolId: string, nodeId: string, options?: GetNodeRemoteLoginSettingsOptions): Promise; + getPool(poolId: string, options?: GetPoolOptions): Promise; + getTask(jobId: string, taskId: string, options?: GetTaskOptions): Promise; + getTaskFile(jobId: string, taskId: string, filePath: string, options?: GetTaskFileOptions): Promise; + getTaskFileProperties(jobId: string, taskId: string, filePath: string, options?: GetTaskFilePropertiesOptions): Promise; + jobScheduleExists(jobScheduleId: string, options?: JobScheduleExistsOptions): Promise; + listApplications(options?: ListApplicationsOptions): Promise; + listCertificates(options?: ListCertificatesOptions): Promise; + listJobPreparationAndReleaseTaskStatus(jobId: string, options?: ListJobPreparationAndReleaseTaskStatusOptions): Promise; + listJobs(options?: ListJobsOptions): Promise; + listJobSchedules(options?: ListJobSchedulesOptions): Promise; + listJobsFromSchedule(jobScheduleId: string, options?: ListJobsFromScheduleOptions): Promise; + listNodeExtensions(poolId: string, nodeId: string, options?: ListNodeExtensionsOptions): Promise; + listNodeFiles(poolId: string, nodeId: string, options?: ListNodeFilesOptions): Promise; + listNodes(poolId: string, options?: ListNodesOptions): Promise; + listPoolNodeCounts(options?: ListPoolNodeCountsOptions): Promise; + listPools(options?: ListPoolsOptions): Promise; + listPoolUsageMetrics(options?: ListPoolUsageMetricsOptions): Promise; + listSubTasks(jobId: string, taskId: string, options?: ListSubTasksOptions): Promise; + listSupportedImages(options?: ListSupportedImagesOptions): Promise; + listTaskFiles(jobId: string, taskId: string, options?: ListTaskFilesOptions): Promise; + listTasks(jobId: string, options?: ListTasksOptions): Promise; + readonly pipeline: Pipeline; + poolExists(poolId: string, options?: PoolExistsOptions): Promise; + reactivateTask(jobId: string, taskId: string, options?: ReactivateTaskOptions): Promise; + rebootNode(poolId: string, nodeId: string, body: NodeRebootOptions, options?: RebootNodeOptions): Promise; + reimageNode(poolId: string, nodeId: string, body: NodeReimageOptions, options?: ReimageNodeOptions): Promise; + removeNodes(poolId: string, body: NodeRemoveOptions, options?: RemoveNodesOptions): Promise; + replaceJob(jobId: string, body: BatchJob, options?: ReplaceJobOptions): Promise; + replaceJobSchedule(jobScheduleId: string, body: BatchJobSchedule, options?: ReplaceJobScheduleOptions): Promise; + replaceNodeUser(poolId: string, nodeId: string, userName: string, body: BatchNodeUserUpdateOptions, options?: ReplaceNodeUserOptions): Promise; + replacePoolProperties(poolId: string, body: BatchPoolReplaceOptions, options?: ReplacePoolPropertiesOptions): Promise; + replaceTask(jobId: string, taskId: string, body: BatchTask, options?: ReplaceTaskOptions): Promise; + resizePool(poolId: string, body: BatchPoolResizeOptions, options?: ResizePoolOptions): Promise; + stopPoolResize(poolId: string, options?: StopPoolResizeOptions): Promise; + terminateJob(jobId: string, body: BatchJobTerminateOptions, options?: TerminateJobOptions): Promise; + terminateJobSchedule(jobScheduleId: string, options?: TerminateJobScheduleOptions): Promise; + terminateTask(jobId: string, taskId: string, options?: TerminateTaskOptions): Promise; + updateJob(jobId: string, body: BatchJobUpdateOptions, options?: UpdateJobOptions): Promise; + updateJobSchedule(jobScheduleId: string, body: BatchJobScheduleUpdateOptions, options?: UpdateJobScheduleOptions): Promise; + updatePool(poolId: string, body: BatchPoolUpdateOptions, options?: UpdatePoolOptions): Promise; + uploadNodeLogs(poolId: string, nodeId: string, body: UploadBatchServiceLogsOptions, options?: UploadNodeLogsOptions): Promise; +} + +// @public (undocumented) +export interface BatchClientOptions extends ClientOptions { +} + +// @public +export interface BatchError { + code: string; + message?: ErrorMessage; + values?: BatchErrorDetail[]; +} + +// @public +export interface BatchErrorDetail { + key?: string; + value?: string; +} + +// @public +export interface BatchJob { + allowTaskPreemption?: boolean; + readonly commonEnvironmentSettings?: EnvironmentSetting[]; + constraints?: JobConstraints; + readonly creationTime?: Date; + readonly displayName?: string; + readonly eTag?: string; + readonly executionInfo?: JobExecutionInformation; + readonly id?: string; + readonly jobManagerTask?: JobManagerTask; + readonly jobPreparationTask?: JobPreparationTask; + readonly jobReleaseTask?: JobReleaseTask; + readonly lastModified?: Date; + maxParallelTasks?: number; + metadata?: MetadataItem[]; + readonly networkConfiguration?: JobNetworkConfiguration; + onAllTasksComplete?: OnAllTasksComplete; + readonly onTaskFailure?: OnTaskFailure; + poolInfo: PoolInformation; + readonly previousState?: JobState; + readonly previousStateTransitionTime?: Date; + priority?: number; + readonly state?: JobState; + readonly stateTransitionTime?: Date; + readonly stats?: JobStatistics; + readonly url?: string; + readonly usesTaskDependencies?: boolean; +} + +// @public +export interface BatchJobCreateOptions { + allowTaskPreemption?: boolean; + commonEnvironmentSettings?: EnvironmentSetting[]; + constraints?: JobConstraints; + displayName?: string; + id: string; + jobManagerTask?: JobManagerTask; + jobPreparationTask?: JobPreparationTask; + jobReleaseTask?: JobReleaseTask; + maxParallelTasks?: number; + metadata?: MetadataItem[]; + networkConfiguration?: JobNetworkConfiguration; + onAllTasksComplete?: OnAllTasksComplete; + onTaskFailure?: OnTaskFailure; + poolInfo: PoolInformation; + priority?: number; + usesTaskDependencies?: boolean; +} + +// @public +export interface BatchJobDisableOptions { + disableTasks: DisableJobOption; +} + +// @public +export interface BatchJobListPreparationAndReleaseTaskStatusResult { + "odata.nextLink"?: string; + value?: JobPreparationAndReleaseTaskExecutionInformation[]; +} + +// @public +export interface BatchJobListResult { + "odata.nextLink"?: string; + value?: BatchJob[]; +} + +// @public +export interface BatchJobSchedule { + readonly creationTime?: Date; + readonly displayName?: string; + readonly eTag?: string; + readonly executionInfo?: JobScheduleExecutionInformation; + readonly id?: string; + jobSpecification: JobSpecification; + readonly lastModified?: Date; + metadata?: MetadataItem[]; + readonly previousState?: JobScheduleState; + readonly previousStateTransitionTime?: Date; + schedule: Schedule; + readonly state?: JobScheduleState; + readonly stateTransitionTime?: Date; + readonly stats?: JobScheduleStatistics; + readonly url?: string; +} + +// @public +export interface BatchJobScheduleCreateOptions { + displayName?: string; + id: string; + jobSpecification: JobSpecification; + metadata?: MetadataItem[]; + schedule: Schedule; +} + +// @public +export interface BatchJobScheduleListResult { + "odata.nextLink"?: string; + value?: BatchJobSchedule[]; +} + +// @public +export interface BatchJobScheduleUpdateOptions { + jobSpecification?: JobSpecification; + metadata?: MetadataItem[]; + schedule?: Schedule; +} + +// @public +export interface BatchJobTerminateOptions { + terminateReason?: string; +} + +// @public +export interface BatchJobUpdateOptions { + allowTaskPreemption?: boolean; + constraints?: JobConstraints; + maxParallelTasks?: number; + metadata?: MetadataItem[]; + onAllTasksComplete?: OnAllTasksComplete; + poolInfo?: PoolInformation; + priority?: number; +} + +// @public +export interface BatchNode { + affinityId?: string; + allocationTime?: Date; + certificateReferences?: CertificateReference[]; + endpointConfiguration?: BatchNodeEndpointConfiguration; + errors?: BatchNodeError[]; + id?: string; + ipAddress?: string; + isDedicated?: boolean; + lastBootTime?: Date; + nodeAgentInfo?: NodeAgentInformation; + recentTasks?: TaskInformation[]; + runningTasksCount?: number; + runningTaskSlotsCount?: number; + schedulingState?: SchedulingState; + startTask?: StartTask; + startTaskInfo?: StartTaskInformation; + state?: BatchNodeState; + stateTransitionTime?: Date; + totalTasksRun?: number; + totalTasksSucceeded?: number; + url?: string; + virtualMachineInfo?: VirtualMachineInfo; + vmSize?: string; +} + +// @public +export type BatchNodeDeallocationOption = string; + +// @public +export interface BatchNodeEndpointConfiguration { + inboundEndpoints: InboundEndpoint[]; +} + +// @public +export interface BatchNodeError { + code?: string; + errorDetails?: NameValuePair[]; + message?: string; +} + +// @public +export type BatchNodeFillType = string; + +// @public +export interface BatchNodeIdentityReference { + resourceId?: string; +} + +// @public +export interface BatchNodeInformation { + affinityId?: string; + nodeId?: string; + nodeUrl?: string; + poolId?: string; + taskRootDirectory?: string; + taskRootDirectoryUrl?: string; +} + +// @public +export interface BatchNodeListResult { + "odata.nextLink"?: string; + value?: BatchNode[]; +} + +// @public +export type BatchNodeRebootOption = string; + +// @public +export type BatchNodeReimageOption = string; + +// @public +export interface BatchNodeRemoteLoginSettingsResult { + remoteLoginIPAddress: string; + remoteLoginPort: number; +} + +// @public +export type BatchNodeState = string; + +// @public +export interface BatchNodeUserCreateOptions { + expiryTime?: Date; + isAdmin?: boolean; + name: string; + password?: string; + sshPublicKey?: string; +} + +// @public +export interface BatchNodeUserUpdateOptions { + expiryTime?: Date; + password?: string; + sshPublicKey?: string; +} + +// @public +export interface BatchPool { + readonly allocationState?: AllocationState; + readonly allocationStateTransitionTime?: Date; + readonly applicationLicenses?: string[]; + readonly applicationPackageReferences?: ApplicationPackageReference[]; + readonly autoScaleEvaluationInterval?: string; + readonly autoScaleFormula?: string; + readonly autoScaleRun?: AutoScaleRun; + readonly certificateReferences?: CertificateReference[]; + readonly cloudServiceConfiguration?: CloudServiceConfiguration; + readonly creationTime?: Date; + readonly currentDedicatedNodes?: number; + readonly currentLowPriorityNodes?: number; + readonly currentNodeCommunicationMode?: NodeCommunicationMode; + readonly displayName?: string; + readonly enableAutoScale?: boolean; + readonly enableInterNodeCommunication?: boolean; + readonly eTag?: string; + readonly id?: string; + readonly identity?: BatchPoolIdentity; + readonly lastModified?: Date; + readonly metadata?: MetadataItem[]; + readonly mountConfiguration?: MountConfiguration[]; + readonly networkConfiguration?: NetworkConfiguration; + readonly resizeErrors?: ResizeError[]; + readonly resizeTimeout?: string; + startTask?: StartTask; + readonly state?: PoolState; + readonly stateTransitionTime?: Date; + readonly stats?: PoolStatistics; + readonly targetDedicatedNodes?: number; + readonly targetLowPriorityNodes?: number; + targetNodeCommunicationMode?: NodeCommunicationMode; + readonly taskSchedulingPolicy?: TaskSchedulingPolicy; + readonly taskSlotsPerNode?: number; + readonly url?: string; + readonly userAccounts?: UserAccount[]; + readonly virtualMachineConfiguration?: VirtualMachineConfiguration; + readonly vmSize?: string; +} + +// @public +export interface BatchPoolCreateOptions { + applicationLicenses?: string[]; + applicationPackageReferences?: ApplicationPackageReference[]; + autoScaleEvaluationInterval?: string; + autoScaleFormula?: string; + certificateReferences?: CertificateReference[]; + cloudServiceConfiguration?: CloudServiceConfiguration; + displayName?: string; + enableAutoScale?: boolean; + enableInterNodeCommunication?: boolean; + id: string; + metadata?: MetadataItem[]; + mountConfiguration?: MountConfiguration[]; + networkConfiguration?: NetworkConfiguration; + resizeTimeout?: string; + startTask?: StartTask; + targetDedicatedNodes?: number; + targetLowPriorityNodes?: number; + targetNodeCommunicationMode?: NodeCommunicationMode; + taskSchedulingPolicy?: TaskSchedulingPolicy; + taskSlotsPerNode?: number; + userAccounts?: UserAccount[]; + virtualMachineConfiguration?: VirtualMachineConfiguration; + vmSize: string; +} + +// @public +export interface BatchPoolEnableAutoScaleOptions { + autoScaleEvaluationInterval?: string; + autoScaleFormula?: string; +} + +// @public +export interface BatchPoolEvaluateAutoScaleOptions { + autoScaleFormula: string; +} + +// @public +export interface BatchPoolIdentity { + type: PoolIdentityType; + userAssignedIdentities?: UserAssignedIdentity[]; +} + +// @public +export interface BatchPoolListResult { + "odata.nextLink"?: string; + value?: BatchPool[]; +} + +// @public +export interface BatchPoolReplaceOptions { + applicationPackageReferences: ApplicationPackageReference[]; + certificateReferences: CertificateReference[]; + metadata: MetadataItem[]; + startTask?: StartTask; + targetNodeCommunicationMode?: NodeCommunicationMode; +} + +// @public +export interface BatchPoolResizeOptions { + nodeDeallocationOption?: BatchNodeDeallocationOption; + resizeTimeout?: string; + targetDedicatedNodes?: number; + targetLowPriorityNodes?: number; +} + +// @public +export interface BatchPoolUpdateOptions { + applicationPackageReferences?: ApplicationPackageReference[]; + certificateReferences?: CertificateReference[]; + metadata?: MetadataItem[]; + startTask?: StartTask; + targetNodeCommunicationMode?: NodeCommunicationMode; +} + +// @public +export interface BatchTask { + readonly affinityInfo?: AffinityInformation; + readonly applicationPackageReferences?: ApplicationPackageReference[]; + readonly authenticationTokenSettings?: AuthenticationTokenSettings; + readonly commandLine?: string; + constraints?: TaskConstraints; + readonly containerSettings?: TaskContainerSettings; + readonly creationTime?: Date; + readonly dependsOn?: TaskDependencies; + readonly displayName?: string; + readonly environmentSettings?: EnvironmentSetting[]; + readonly eTag?: string; + readonly executionInfo?: TaskExecutionInformation; + readonly exitConditions?: ExitConditions; + readonly id?: string; + readonly lastModified?: Date; + readonly multiInstanceSettings?: MultiInstanceSettings; + readonly nodeInfo?: BatchNodeInformation; + readonly outputFiles?: OutputFile[]; + readonly previousState?: TaskState; + readonly previousStateTransitionTime?: Date; + readonly requiredSlots?: number; + readonly resourceFiles?: ResourceFile[]; + readonly state?: TaskState; + readonly stateTransitionTime?: Date; + readonly stats?: TaskStatistics; + readonly url?: string; + readonly userIdentity?: UserIdentity; +} + +// @public +export interface BatchTaskCollection { + value: BatchTaskCreateOptions[]; +} + +// @public +export interface BatchTaskCreateOptions { + affinityInfo?: AffinityInformation; + applicationPackageReferences?: ApplicationPackageReference[]; + authenticationTokenSettings?: AuthenticationTokenSettings; + commandLine: string; + constraints?: TaskConstraints; + containerSettings?: TaskContainerSettings; + dependsOn?: TaskDependencies; + displayName?: string; + environmentSettings?: EnvironmentSetting[]; + exitConditions?: ExitConditions; + id: string; + multiInstanceSettings?: MultiInstanceSettings; + outputFiles?: OutputFile[]; + requiredSlots?: number; + resourceFiles?: ResourceFile[]; + userIdentity?: UserIdentity; +} + +// @public +export interface BatchTaskListResult { + "odata.nextLink"?: string; + value?: BatchTask[]; +} + +// @public +export interface BatchTaskListSubtasksResult { + value?: SubtaskInformation[]; +} + +// @public +export type CachingType = string; + +// @public (undocumented) +export interface CancelCertificateDeletionOptions extends OperationOptions { + timeOut?: number; +} + +// @public +export type CertificateFormat = string; + +// @public +export interface CertificateListResult { + "odata.nextLink"?: string; + value?: BatchCertificate[]; +} + +// @public +export interface CertificateReference { + storeLocation?: CertificateStoreLocation; + storeName?: string; + thumbprint: string; + thumbprintAlgorithm: string; + visibility?: CertificateVisibility[]; +} + +// @public +export type CertificateState = string; + +// @public +export type CertificateStoreLocation = string; + +// @public +export type CertificateVisibility = string; + +// @public +export interface CifsMountConfiguration { + mountOptions?: string; + password: string; + relativeMountPath: string; + source: string; + username: string; +} + +// @public +export interface CloudServiceConfiguration { + osFamily: string; + osVersion?: string; +} + +// @public +export interface ContainerConfiguration { + containerImageNames?: string[]; + containerRegistries?: ContainerRegistry[]; + type: ContainerType; +} + +// @public +export interface ContainerRegistry { + identityReference?: BatchNodeIdentityReference; + password?: string; + registryServer?: string; + username?: string; +} + +// @public +export type ContainerType = string; + +// @public +export type ContainerWorkingDirectory = string; + +// @public (undocumented) +export interface CreateCertificateOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface CreateJobOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface CreateJobScheduleOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface CreateNodeUserOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface CreatePoolOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface CreateTaskCollectionOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface CreateTaskOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public +export interface DataDisk { + caching?: CachingType; + diskSizeGB: number; + lun: number; + storageAccountType?: StorageAccountType; +} + +// @public +export interface DeleteCertificateError { + code?: string; + message?: string; + values?: NameValuePair[]; +} + +// @public (undocumented) +export interface DeleteCertificateOptions extends OperationOptions { + timeOut?: number; +} + +// @public (undocumented) +export interface DeleteJobOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface DeleteJobScheduleOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface DeleteNodeFileOptions extends OperationOptions { + recursive?: boolean; + timeOut?: number; +} + +// @public (undocumented) +export interface DeleteNodeUserOptions extends OperationOptions { + timeOut?: number; +} + +// @public (undocumented) +export interface DeletePoolOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface DeleteTaskFileOptions extends OperationOptions { + recursive?: boolean; + timeOut?: number; +} + +// @public (undocumented) +export interface DeleteTaskOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export type DependencyAction = string; + +// @public +export type DiffDiskPlacement = string; + +// @public +export interface DiffDiskSettings { + placement?: DiffDiskPlacement; +} + +// @public +export type DisableBatchNodeSchedulingOption = string; + +// @public +export type DisableJobOption = string; + +// @public (undocumented) +export interface DisableJobOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface DisableJobScheduleOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface DisableNodeSchedulingOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface DisablePoolAutoScaleOptions extends OperationOptions { + timeOut?: number; +} + +// @public +export interface DiskEncryptionConfiguration { + targets?: DiskEncryptionTarget[]; +} + +// @public +export type DiskEncryptionTarget = string; + +// @public +export type DynamicVNetAssignmentScope = string; + +// @public +export type ElevationLevel = string; + +// @public (undocumented) +export interface EnableJobOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface EnableJobScheduleOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface EnableNodeSchedulingOptions extends OperationOptions { + timeOut?: number; +} + +// @public (undocumented) +export interface EnablePoolAutoScaleOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export interface EnvironmentSetting { + name: string; + value?: string; +} + +// @public +export type ErrorCategory = string; + +// @public +export interface ErrorMessage { + lang?: string; + value?: string; +} + +// @public (undocumented) +export interface EvaluatePoolAutoScaleOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public +export interface ExitCodeMapping { + code: number; + exitOptions: ExitOptions; +} + +// @public +export interface ExitCodeRangeMapping { + end: number; + exitOptions: ExitOptions; + start: number; +} + +// @public +export interface ExitConditions { + default?: ExitOptions; + exitCodeRanges?: ExitCodeRangeMapping[]; + exitCodes?: ExitCodeMapping[]; + fileUploadError?: ExitOptions; + preProcessingError?: ExitOptions; +} + +// @public +export interface ExitOptions { + dependencyAction?: DependencyAction; + jobAction?: JobAction; +} + +// @public +export interface FileProperties { + contentLength: number; + contentType?: string; + creationTime?: Date; + fileMode?: string; + lastModified: Date; +} + +// @public (undocumented) +export interface GetApplicationOptions extends OperationOptions { + timeOut?: number; +} + +// @public (undocumented) +export interface GetCertificateOptions extends OperationOptions { + $select?: string[]; + timeOut?: number; +} + +// @public (undocumented) +export interface GetJobOptions extends OperationOptions { + $expand?: string[]; + $select?: string[]; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface GetJobScheduleOptions extends OperationOptions { + $expand?: string[]; + $select?: string[]; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface GetJobTaskCountsOptions extends OperationOptions { + timeOut?: number; +} + +// @public (undocumented) +export interface GetNodeExtensionOptions extends OperationOptions { + $select?: string[]; + timeOut?: number; +} + +// @public (undocumented) +export interface GetNodeFileOptions extends OperationOptions { + ifModifiedSince?: Date; + ifUnmodifiedSince?: Date; + ocpRange?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface GetNodeFilePropertiesOptions extends OperationOptions { + ifModifiedSince?: Date; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface GetNodeOptions extends OperationOptions { + $select?: string[]; + timeOut?: number; +} + +// @public (undocumented) +export interface GetNodeRemoteDesktopFileOptions extends OperationOptions { + timeOut?: number; +} + +// @public (undocumented) +export interface GetNodeRemoteLoginSettingsOptions extends OperationOptions { + timeOut?: number; +} + +// @public (undocumented) +export interface GetPoolOptions extends OperationOptions { + $expand?: string[]; + $select?: string[]; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface GetTaskFileOptions extends OperationOptions { + ifModifiedSince?: Date; + ifUnmodifiedSince?: Date; + ocpRange?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface GetTaskFilePropertiesOptions extends OperationOptions { + ifModifiedSince?: Date; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface GetTaskOptions extends OperationOptions { + $expand?: string[]; + $select?: string[]; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export interface HttpHeader { + name: string; + value?: string; +} + +// @public +export interface ImageInformation { + batchSupportEndOfLife?: Date; + capabilities?: string[]; + imageReference: ImageReference; + nodeAgentSKUId: string; + osType: OSType; + verificationType: VerificationType; +} + +// @public +export interface ImageReference { + readonly exactVersion?: string; + offer?: string; + publisher?: string; + sku?: string; + version?: string; + virtualMachineImageId?: string; +} + +// @public +export interface InboundEndpoint { + backendPort: number; + frontendPort: number; + name: string; + protocol: InboundEndpointProtocol; + publicFQDN?: string; + publicIPAddress?: string; +} + +// @public +export type InboundEndpointProtocol = string; + +// @public +export interface InboundNATPool { + backendPort: number; + frontendPortRangeEnd: number; + frontendPortRangeStart: number; + name: string; + networkSecurityGroupRules?: NetworkSecurityGroupRule[]; + protocol: InboundEndpointProtocol; +} + +// @public +export interface InstanceViewStatus { + code?: string; + displayStatus?: string; + level?: StatusLevelTypes; + message?: string; + time?: string; +} + +// @public +export type IPAddressProvisioningType = string; + +// @public +export type JobAction = string; + +// @public +export interface JobConstraints { + maxTaskRetryCount?: number; + maxWallClockTime?: string; +} + +// @public +export interface JobExecutionInformation { + endTime?: Date; + poolId?: string; + schedulingError?: JobSchedulingError; + startTime: Date; + terminateReason?: string; +} + +// @public +export interface JobManagerTask { + allowLowPriorityNode?: boolean; + applicationPackageReferences?: ApplicationPackageReference[]; + authenticationTokenSettings?: AuthenticationTokenSettings; + commandLine: string; + constraints?: TaskConstraints; + containerSettings?: TaskContainerSettings; + displayName?: string; + environmentSettings?: EnvironmentSetting[]; + id: string; + killJobOnCompletion?: boolean; + outputFiles?: OutputFile[]; + requiredSlots?: number; + resourceFiles?: ResourceFile[]; + runExclusive?: boolean; + userIdentity?: UserIdentity; +} + +// @public +export interface JobNetworkConfiguration { + subnetId: string; +} + +// @public +export interface JobPreparationAndReleaseTaskExecutionInformation { + jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformation; + jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformation; + nodeId?: string; + nodeUrl?: string; + poolId?: string; +} + +// @public +export interface JobPreparationTask { + commandLine: string; + constraints?: TaskConstraints; + containerSettings?: TaskContainerSettings; + environmentSettings?: EnvironmentSetting[]; + id?: string; + rerunOnNodeRebootAfterSuccess?: boolean; + resourceFiles?: ResourceFile[]; + userIdentity?: UserIdentity; + waitForSuccess?: boolean; +} + +// @public +export interface JobPreparationTaskExecutionInformation { + containerInfo?: TaskContainerExecutionInformation; + endTime?: Date; + exitCode?: number; + failureInfo?: TaskFailureInformation; + lastRetryTime?: Date; + result?: TaskExecutionResult; + retryCount: number; + startTime: Date; + state: JobPreparationTaskState; + taskRootDirectory?: string; + taskRootDirectoryUrl?: string; +} + +// @public +export type JobPreparationTaskState = string; + +// @public +export interface JobReleaseTask { + commandLine: string; + containerSettings?: TaskContainerSettings; + environmentSettings?: EnvironmentSetting[]; + id?: string; + maxWallClockTime?: string; + resourceFiles?: ResourceFile[]; + retentionTime?: string; + userIdentity?: UserIdentity; +} + +// @public +export interface JobReleaseTaskExecutionInformation { + containerInfo?: TaskContainerExecutionInformation; + endTime?: Date; + exitCode?: number; + failureInfo?: TaskFailureInformation; + result?: TaskExecutionResult; + startTime: Date; + state: JobReleaseTaskState; + taskRootDirectory?: string; + taskRootDirectoryUrl?: string; +} + +// @public +export type JobReleaseTaskState = string; + +// @public +export interface JobScheduleExecutionInformation { + endTime?: Date; + nextRunTime?: Date; + recentJob?: RecentJob; +} + +// @public (undocumented) +export interface JobScheduleExistsOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export type JobScheduleState = string; + +// @public +export interface JobScheduleStatistics { + kernelCPUTime: string; + lastUpdateTime: Date; + numFailedTasks: number; + numSucceededTasks: number; + numTaskRetries: number; + readIOGiB: number; + readIOps: number; + startTime: Date; + url: string; + userCPUTime: string; + waitTime: string; + wallClockTime: string; + writeIOGiB: number; + writeIOps: number; +} + +// @public +export interface JobSchedulingError { + category: ErrorCategory; + code?: string; + details?: NameValuePair[]; + message?: string; +} + +// @public +export interface JobSpecification { + allowTaskPreemption?: boolean; + commonEnvironmentSettings?: EnvironmentSetting[]; + constraints?: JobConstraints; + displayName?: string; + jobManagerTask?: JobManagerTask; + jobPreparationTask?: JobPreparationTask; + jobReleaseTask?: JobReleaseTask; + maxParallelTasks?: number; + metadata?: MetadataItem[]; + networkConfiguration?: JobNetworkConfiguration; + onAllTasksComplete?: OnAllTasksComplete; + onTaskFailure?: OnTaskFailure; + poolInfo: PoolInformation; + priority?: number; + usesTaskDependencies?: boolean; +} + +// @public +export type JobState = string; + +// @public +export interface JobStatistics { + kernelCPUTime: string; + lastUpdateTime: Date; + numFailedTasks: number; + numSucceededTasks: number; + numTaskRetries: number; + readIOGiB: number; + readIOps: number; + startTime: Date; + url: string; + userCPUTime: string; + waitTime: string; + wallClockTime: string; + writeIOGiB: number; + writeIOps: number; +} + +// @public +export interface LinuxUserConfiguration { + gid?: number; + sshPrivateKey?: string; + uid?: number; +} + +// @public (undocumented) +export interface ListApplicationsOptions extends OperationOptions { + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListCertificatesOptions extends OperationOptions { + $filter?: string; + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListJobPreparationAndReleaseTaskStatusOptions extends OperationOptions { + $filter?: string; + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListJobSchedulesOptions extends OperationOptions { + $expand?: string[]; + $filter?: string; + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListJobsFromScheduleOptions extends OperationOptions { + $expand?: string[]; + $filter?: string; + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListJobsOptions extends OperationOptions { + $expand?: string[]; + $filter?: string; + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListNodeExtensionsOptions extends OperationOptions { + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListNodeFilesOptions extends OperationOptions { + $filter?: string; + maxresults?: number; + recursive?: boolean; + timeOut?: number; +} + +// @public (undocumented) +export interface ListNodesOptions extends OperationOptions { + $filter?: string; + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListPoolNodeCountsOptions extends OperationOptions { + $filter?: string; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListPoolsOptions extends OperationOptions { + $expand?: string[]; + $filter?: string; + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListPoolUsageMetricsOptions extends OperationOptions { + $filter?: string; + endtime?: Date; + maxresults?: number; + starttime?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface ListSubTasksOptions extends OperationOptions { + $select?: string[]; + timeOut?: number; +} + +// @public (undocumented) +export interface ListSupportedImagesOptions extends OperationOptions { + $filter?: string; + maxresults?: number; + timeOut?: number; +} + +// @public (undocumented) +export interface ListTaskFilesOptions extends OperationOptions { + $filter?: string; + maxresults?: number; + recursive?: boolean; + timeOut?: number; +} + +// @public (undocumented) +export interface ListTasksOptions extends OperationOptions { + $expand?: string[]; + $filter?: string; + $select?: string[]; + maxresults?: number; + timeOut?: number; +} + +// @public +export type LoginMode = string; + +// @public +export interface MetadataItem { + name: string; + value: string; +} + +// @public +export interface MountConfiguration { + azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; + azureFileShareConfiguration?: AzureFileShareConfiguration; + cifsMountConfiguration?: CifsMountConfiguration; + nfsMountConfiguration?: NfsMountConfiguration; +} + +// @public +export interface MultiInstanceSettings { + commonResourceFiles?: ResourceFile[]; + coordinationCommandLine: string; + numberOfInstances?: number; +} + +// @public +export interface NameValuePair { + name?: string; + value?: string; +} + +// @public +export interface NetworkConfiguration { + dynamicVNetAssignmentScope?: DynamicVNetAssignmentScope; + enableAcceleratedNetworking?: boolean; + endpointConfiguration?: PoolEndpointConfiguration; + publicIPAddressConfiguration?: PublicIpAddressConfiguration; + subnetId?: string; +} + +// @public +export interface NetworkSecurityGroupRule { + access: NetworkSecurityGroupRuleAccess; + priority: number; + sourceAddressPrefix: string; + sourcePortRanges?: string[]; +} + +// @public +export type NetworkSecurityGroupRuleAccess = string; + +// @public +export interface NfsMountConfiguration { + mountOptions?: string; + relativeMountPath: string; + source: string; +} + +// @public +export interface NodeAgentInformation { + lastUpdateTime: Date; + version: string; +} + +// @public +export type NodeCommunicationMode = string; + +// @public +export interface NodeCounts { + creating: number; + idle: number; + leavingPool: number; + offline: number; + preempted: number; + rebooting: number; + reimaging: number; + running: number; + starting: number; + startTaskFailed: number; + total: number; + unknown: number; + unusable: number; + waitingForStartTask: number; +} + +// @public +export interface NodeDisableSchedulingOptions { + nodeDisableSchedulingOption?: DisableBatchNodeSchedulingOption; +} + +// @public +export interface NodeFile { + isDirectory?: boolean; + name?: string; + properties?: FileProperties; + url?: string; +} + +// @public +export interface NodeFileListResult { + "odata.nextLink"?: string; + value?: NodeFile[]; +} + +// @public +export interface NodePlacementConfiguration { + policy?: NodePlacementPolicyType; +} + +// @public +export type NodePlacementPolicyType = string; + +// @public +export interface NodeRebootOptions { + nodeRebootOption?: BatchNodeRebootOption; +} + +// @public +export interface NodeReimageOptions { + nodeReimageOption?: BatchNodeReimageOption; +} + +// @public +export interface NodeRemoveOptions { + nodeDeallocationOption?: BatchNodeDeallocationOption; + nodeList: string[]; + resizeTimeout?: string; +} + +// @public +export interface NodeVMExtension { + instanceView?: VMExtensionInstanceView; + provisioningState?: string; + vmExtension?: VMExtension; +} + +// @public +export interface NodeVMExtensionList { + "odata.nextLink"?: string; + value?: NodeVMExtension[]; +} + +// @public +export type OnAllTasksComplete = string; + +// @public +export type OnTaskFailure = string; + +// @public +export interface OSDisk { + ephemeralOSDiskSettings?: DiffDiskSettings; +} + +// @public +export type OSType = string; + +// @public +export interface OutputFile { + destination: OutputFileDestination; + filePattern: string; + uploadOptions: OutputFileUploadOptions; +} + +// @public +export interface OutputFileBlobContainerDestination { + containerUrl: string; + identityReference?: BatchNodeIdentityReference; + path?: string; + uploadHeaders?: HttpHeader[]; +} + +// @public +export interface OutputFileDestination { + container?: OutputFileBlobContainerDestination; +} + +// @public +export type OutputFileUploadCondition = string; + +// @public +export interface OutputFileUploadOptions { + uploadCondition: OutputFileUploadCondition; +} + +// @public +export interface PoolEndpointConfiguration { + inboundNATPools: InboundNATPool[]; +} + +// @public (undocumented) +export interface PoolExistsOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export type PoolIdentityType = string; + +// @public +export interface PoolInformation { + autoPoolSpecification?: AutoPoolSpecification; + poolId?: string; +} + +// @public +export type PoolLifetimeOption = string; + +// @public +export interface PoolListUsageMetricsResult { + "odata.nextLink"?: string; + value?: PoolUsageMetrics[]; +} + +// @public +export interface PoolNodeCounts { + dedicated?: NodeCounts; + lowPriority?: NodeCounts; + poolId: string; +} + +// @public +export interface PoolNodeCountsListResult { + "odata.nextLink"?: string; + value?: PoolNodeCounts[]; +} + +// @public +export interface PoolSpecification { + applicationLicenses?: string[]; + applicationPackageReferences?: ApplicationPackageReference[]; + autoScaleEvaluationInterval?: string; + autoScaleFormula?: string; + certificateReferences?: CertificateReference[]; + cloudServiceConfiguration?: CloudServiceConfiguration; + displayName?: string; + enableAutoScale?: boolean; + enableInterNodeCommunication?: boolean; + metadata?: MetadataItem[]; + mountConfiguration?: MountConfiguration[]; + networkConfiguration?: NetworkConfiguration; + resizeTimeout?: string; + startTask?: StartTask; + targetDedicatedNodes?: number; + targetLowPriorityNodes?: number; + targetNodeCommunicationMode?: NodeCommunicationMode; + taskSchedulingPolicy?: TaskSchedulingPolicy; + taskSlotsPerNode?: number; + userAccounts?: UserAccount[]; + virtualMachineConfiguration?: VirtualMachineConfiguration; + vmSize: string; +} + +// @public +export type PoolState = string; + +// @public +export interface PoolStatistics { + lastUpdateTime: Date; + resourceStats?: ResourceStatistics; + startTime: Date; + url: string; + usageStats?: UsageStatistics; +} + +// @public +export interface PoolUsageMetrics { + endTime: Date; + poolId: string; + startTime: Date; + totalCoreHours: number; + vmSize: string; +} + +// @public +export interface PublicIpAddressConfiguration { + ipAddressIds?: string[]; + provision?: IPAddressProvisioningType; +} + +// @public (undocumented) +export interface ReactivateTaskOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface RebootNodeOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public +export interface RecentJob { + id?: string; + url?: string; +} + +// @public (undocumented) +export interface ReimageNodeOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface RemoveNodesOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface ReplaceJobOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface ReplaceJobScheduleOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface ReplaceNodeUserOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface ReplacePoolPropertiesOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public (undocumented) +export interface ReplaceTaskOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export interface ResizeError { + code?: string; + message?: string; + values?: NameValuePair[]; +} + +// @public (undocumented) +export interface ResizePoolOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export interface ResourceFile { + autoStorageContainerName?: string; + blobPrefix?: string; + fileMode?: string; + filePath?: string; + httpUrl?: string; + identityReference?: BatchNodeIdentityReference; + storageContainerUrl?: string; +} + +// @public +export interface ResourceStatistics { + avgCPUPercentage: number; + avgDiskGiB: number; + avgMemoryGiB: number; + diskReadGiB: number; + diskReadIOps: number; + diskWriteGiB: number; + diskWriteIOps: number; + lastUpdateTime: Date; + networkReadGiB: number; + networkWriteGiB: number; + peakDiskGiB: number; + peakMemoryGiB: number; + startTime: Date; +} + +// @public +export interface Schedule { + doNotRunAfter?: Date; + doNotRunUntil?: Date; + recurrenceInterval?: string; + startWindow?: string; +} + +// @public +export type SchedulingState = string; + +// @public +export interface StartTask { + commandLine: string; + containerSettings?: TaskContainerSettings; + environmentSettings?: EnvironmentSetting[]; + maxTaskRetryCount?: number; + resourceFiles?: ResourceFile[]; + userIdentity?: UserIdentity; + waitForSuccess?: boolean; +} + +// @public +export interface StartTaskInformation { + containerInfo?: TaskContainerExecutionInformation; + endTime?: Date; + exitCode?: number; + failureInfo?: TaskFailureInformation; + lastRetryTime?: Date; + result?: TaskExecutionResult; + retryCount: number; + startTime: Date; + state: StartTaskState; +} + +// @public +export type StartTaskState = string; + +// @public +export type StatusLevelTypes = string; + +// @public (undocumented) +export interface StopPoolResizeOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export type StorageAccountType = string; + +// @public +export interface SubtaskInformation { + containerInfo?: TaskContainerExecutionInformation; + endTime?: Date; + exitCode?: number; + failureInfo?: TaskFailureInformation; + id?: number; + nodeInfo?: BatchNodeInformation; + previousState?: SubtaskState; + previousStateTransitionTime?: Date; + result?: TaskExecutionResult; + startTime?: Date; + state?: SubtaskState; + stateTransitionTime?: Date; +} + +// @public +export type SubtaskState = string; + +// @public +export interface TaskAddCollectionResult { + value?: TaskAddResult[]; +} + +// @public +export interface TaskAddResult { + error?: BatchError; + eTag?: string; + lastModified?: Date; + location?: string; + status: TaskAddStatus; + taskId: string; +} + +// @public +export type TaskAddStatus = string; + +// @public +export interface TaskConstraints { + maxTaskRetryCount?: number; + maxWallClockTime?: string; + retentionTime?: string; +} + +// @public +export interface TaskContainerExecutionInformation { + containerId?: string; + error?: string; + state?: string; +} + +// @public +export interface TaskContainerSettings { + containerRunOptions?: string; + imageName: string; + registry?: ContainerRegistry; + workingDirectory?: ContainerWorkingDirectory; +} + +// @public +export interface TaskCounts { + active: number; + completed: number; + failed: number; + running: number; + succeeded: number; +} + +// @public +export interface TaskCountsResult { + taskCounts: TaskCounts; + taskSlotCounts: TaskSlotCounts; +} + +// @public +export interface TaskDependencies { + taskIdRanges?: TaskIdRange[]; + taskIds?: string[]; +} + +// @public +export interface TaskExecutionInformation { + containerInfo?: TaskContainerExecutionInformation; + endTime?: Date; + exitCode?: number; + failureInfo?: TaskFailureInformation; + lastRequeueTime?: Date; + lastRetryTime?: Date; + requeueCount: number; + result?: TaskExecutionResult; + retryCount: number; + startTime?: Date; +} + +// @public +export type TaskExecutionResult = string; + +// @public +export interface TaskFailureInformation { + category: ErrorCategory; + code?: string; + details?: NameValuePair[]; + message?: string; +} + +// @public +export interface TaskIdRange { + end: number; + start: number; +} + +// @public +export interface TaskInformation { + executionInfo?: TaskExecutionInformation; + jobId?: string; + subtaskId?: number; + taskId?: string; + taskState: TaskState; + taskUrl?: string; +} + +// @public +export interface TaskSchedulingPolicy { + nodeFillType: BatchNodeFillType; +} + +// @public +export interface TaskSlotCounts { + active: number; + completed: number; + failed: number; + running: number; + succeeded: number; +} + +// @public +export type TaskState = string; + +// @public +export interface TaskStatistics { + kernelCPUTime: string; + lastUpdateTime: Date; + readIOGiB: number; + readIOps: number; + startTime: Date; + url: string; + userCPUTime: string; + waitTime: string; + wallClockTime: string; + writeIOGiB: number; + writeIOps: number; +} + +// @public (undocumented) +export interface TerminateJobOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface TerminateJobScheduleOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface TerminateTaskOptions extends OperationOptions { + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface UpdateJobOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface UpdateJobScheduleOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public (undocumented) +export interface UpdatePoolOptions extends OperationOptions { + contentType?: string; + ifMatch?: string; + ifModifiedSince?: Date; + ifNoneMatch?: string; + ifUnmodifiedSince?: Date; + timeOut?: number; +} + +// @public +export interface UploadBatchServiceLogsOptions { + containerUrl: string; + endTime?: Date; + identityReference?: BatchNodeIdentityReference; + startTime: Date; +} + +// @public +export interface UploadBatchServiceLogsResult { + numberOfFilesUploaded: number; + virtualDirectoryName: string; +} + +// @public (undocumented) +export interface UploadNodeLogsOptions extends OperationOptions { + contentType?: string; + timeOut?: number; +} + +// @public +export interface UsageStatistics { + dedicatedCoreTime: string; + lastUpdateTime: Date; + startTime: Date; +} + +// @public +export interface UserAccount { + elevationLevel?: ElevationLevel; + linuxUserConfiguration?: LinuxUserConfiguration; + name: string; + password: string; + windowsUserConfiguration?: WindowsUserConfiguration; +} + +// @public +export interface UserAssignedIdentity { + readonly clientId?: string; + readonly principalId?: string; + resourceId: string; +} + +// @public +export interface UserIdentity { + autoUser?: AutoUserSpecification; + username?: string; +} + +// @public +export type VerificationType = string; + +// @public +export interface VirtualMachineConfiguration { + containerConfiguration?: ContainerConfiguration; + dataDisks?: DataDisk[]; + diskEncryptionConfiguration?: DiskEncryptionConfiguration; + extensions?: VMExtension[]; + imageReference: ImageReference; + licenseType?: string; + nodeAgentSKUId: string; + nodePlacementConfiguration?: NodePlacementConfiguration; + osDisk?: OSDisk; + windowsConfiguration?: WindowsConfiguration; +} + +// @public +export interface VirtualMachineInfo { + imageReference?: ImageReference; +} + +// @public +export interface VMExtension { + autoUpgradeMinorVersion?: boolean; + enableAutomaticUpgrade?: boolean; + name: string; + protectedSettings?: Record; + provisionAfterExtensions?: string[]; + publisher: string; + settings?: Record; + type: string; + typeHandlerVersion?: string; +} + +// @public +export interface VMExtensionInstanceView { + name?: string; + statuses?: InstanceViewStatus[]; + subStatuses?: InstanceViewStatus[]; +} + +// @public +export interface WindowsConfiguration { + enableAutomaticUpdates?: boolean; +} + +// @public +export interface WindowsUserConfiguration { + loginMode?: LoginMode; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/BatchClient.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/BatchClient.ts new file mode 100644 index 0000000000..ff17ba9756 --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/BatchClient.ts @@ -0,0 +1,1193 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; +import { + ApplicationListResult, + BatchApplication, + PoolListUsageMetricsResult, + BatchPoolCreateOptions, + BatchPoolListResult, + BatchPool, + AutoScaleRun, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchPoolReplaceOptions, + NodeRemoveOptions, + AccountListSupportedImagesResult, + PoolNodeCountsListResult, + BatchJob, + BatchJobUpdateOptions, + BatchJobDisableOptions, + BatchJobTerminateOptions, + BatchJobCreateOptions, + BatchJobListResult, + BatchJobListPreparationAndReleaseTaskStatusResult, + TaskCountsResult, + BatchCertificate, + CertificateListResult, + BatchJobSchedule, + BatchJobScheduleUpdateOptions, + BatchJobScheduleCreateOptions, + BatchJobScheduleListResult, + BatchTaskCreateOptions, + BatchTaskListResult, + BatchTask, + BatchTaskCollection, + TaskAddCollectionResult, + BatchTaskListSubtasksResult, + NodeFileListResult, + BatchNodeUserCreateOptions, + BatchNodeUserUpdateOptions, + BatchNode, + NodeRebootOptions, + NodeReimageOptions, + NodeDisableSchedulingOptions, + BatchNodeRemoteLoginSettingsResult, + UploadBatchServiceLogsOptions, + UploadBatchServiceLogsResult, + BatchNodeListResult, + NodeVMExtension, + NodeVMExtensionList, +} from "./models/models.js"; +import { + ListApplicationsOptions, + GetApplicationOptions, + ListPoolUsageMetricsOptions, + CreatePoolOptions, + ListPoolsOptions, + DeletePoolOptions, + PoolExistsOptions, + GetPoolOptions, + UpdatePoolOptions, + DisablePoolAutoScaleOptions, + EnablePoolAutoScaleOptions, + EvaluatePoolAutoScaleOptions, + ResizePoolOptions, + StopPoolResizeOptions, + ReplacePoolPropertiesOptions, + RemoveNodesOptions, + ListSupportedImagesOptions, + ListPoolNodeCountsOptions, + DeleteJobOptions, + GetJobOptions, + UpdateJobOptions, + ReplaceJobOptions, + DisableJobOptions, + EnableJobOptions, + TerminateJobOptions, + CreateJobOptions, + ListJobsOptions, + ListJobsFromScheduleOptions, + ListJobPreparationAndReleaseTaskStatusOptions, + GetJobTaskCountsOptions, + CreateCertificateOptions, + ListCertificatesOptions, + CancelCertificateDeletionOptions, + DeleteCertificateOptions, + GetCertificateOptions, + JobScheduleExistsOptions, + DeleteJobScheduleOptions, + GetJobScheduleOptions, + UpdateJobScheduleOptions, + ReplaceJobScheduleOptions, + DisableJobScheduleOptions, + EnableJobScheduleOptions, + TerminateJobScheduleOptions, + CreateJobScheduleOptions, + ListJobSchedulesOptions, + CreateTaskOptions, + ListTasksOptions, + CreateTaskCollectionOptions, + DeleteTaskOptions, + GetTaskOptions, + ReplaceTaskOptions, + ListSubTasksOptions, + TerminateTaskOptions, + ReactivateTaskOptions, + DeleteTaskFileOptions, + GetTaskFileOptions, + GetTaskFilePropertiesOptions, + ListTaskFilesOptions, + CreateNodeUserOptions, + DeleteNodeUserOptions, + ReplaceNodeUserOptions, + GetNodeOptions, + RebootNodeOptions, + ReimageNodeOptions, + DisableNodeSchedulingOptions, + EnableNodeSchedulingOptions, + GetNodeRemoteLoginSettingsOptions, + GetNodeRemoteDesktopFileOptions, + UploadNodeLogsOptions, + ListNodesOptions, + GetNodeExtensionOptions, + ListNodeExtensionsOptions, + DeleteNodeFileOptions, + GetNodeFileOptions, + GetNodeFilePropertiesOptions, + ListNodeFilesOptions, +} from "./models/options.js"; +import { + createBatch, + BatchClientOptions, + BatchContext, + listApplications, + getApplication, + listPoolUsageMetrics, + createPool, + listPools, + deletePool, + poolExists, + getPool, + updatePool, + disablePoolAutoScale, + enablePoolAutoScale, + evaluatePoolAutoScale, + resizePool, + stopPoolResize, + replacePoolProperties, + removeNodes, + listSupportedImages, + listPoolNodeCounts, + deleteJob, + getJob, + updateJob, + replaceJob, + disableJob, + enableJob, + terminateJob, + createJob, + listJobs, + listJobsFromSchedule, + listJobPreparationAndReleaseTaskStatus, + getJobTaskCounts, + createCertificate, + listCertificates, + cancelCertificateDeletion, + deleteCertificate, + getCertificate, + jobScheduleExists, + deleteJobSchedule, + getJobSchedule, + updateJobSchedule, + replaceJobSchedule, + disableJobSchedule, + enableJobSchedule, + terminateJobSchedule, + createJobSchedule, + listJobSchedules, + createTask, + listTasks, + createTaskCollection, + deleteTask, + getTask, + replaceTask, + listSubTasks, + terminateTask, + reactivateTask, + deleteTaskFile, + getTaskFile, + getTaskFileProperties, + listTaskFiles, + createNodeUser, + deleteNodeUser, + replaceNodeUser, + getNode, + rebootNode, + reimageNode, + disableNodeScheduling, + enableNodeScheduling, + getNodeRemoteLoginSettings, + getNodeRemoteDesktopFile, + uploadNodeLogs, + listNodes, + getNodeExtension, + listNodeExtensions, + deleteNodeFile, + getNodeFile, + getNodeFileProperties, + listNodeFiles, +} from "./api/index.js"; + +export { BatchClientOptions } from "./api/BatchContext.js"; + +export class BatchClient { + private _client: BatchContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + /** Azure Batch provides Cloud-scale job scheduling and compute management. */ + constructor( + endpoint: string, + credential: TokenCredential, + options: BatchClientOptions = {} + ) { + this._client = createBatch(endpoint, credential, options); + this.pipeline = this._client.pipeline; + } + + /** + * This operation returns only Applications and versions that are available for + * use on Compute Nodes; that is, that can be used in an Package reference. For + * administrator information about applications and versions that are not yet + * available to Compute Nodes, use the Azure portal or the Azure Resource Manager + * API. + */ + listApplications( + options: ListApplicationsOptions = { requestOptions: {} } + ): Promise { + return listApplications(this._client, options); + } + + /** + * This operation returns only Applications and versions that are available for + * use on Compute Nodes; that is, that can be used in an Package reference. For + * administrator information about Applications and versions that are not yet + * available to Compute Nodes, use the Azure portal or the Azure Resource Manager + * API. + */ + getApplication( + applicationId: string, + options: GetApplicationOptions = { requestOptions: {} } + ): Promise { + return getApplication(this._client, applicationId, options); + } + + /** + * If you do not specify a $filter clause including a poolId, the response + * includes all Pools that existed in the Account in the time range of the + * returned aggregation intervals. If you do not specify a $filter clause + * including a startTime or endTime these filters default to the start and end + * times of the last aggregation interval currently available; that is, only the + * last aggregation interval is returned. + */ + listPoolUsageMetrics( + options: ListPoolUsageMetricsOptions = { requestOptions: {} } + ): Promise { + return listPoolUsageMetrics(this._client, options); + } + + /** + * When naming Pools, avoid including sensitive information such as user names or + * secret project names. This information may appear in telemetry logs accessible + * to Microsoft Support engineers. + */ + createPool( + body: BatchPoolCreateOptions, + options: CreatePoolOptions = { requestOptions: {} } + ): Promise { + return createPool(this._client, body, options); + } + + /** Lists all of the Pools in the specified Account. */ + listPools( + options: ListPoolsOptions = { requestOptions: {} } + ): Promise { + return listPools(this._client, options); + } + + /** + * When you request that a Pool be deleted, the following actions occur: the Pool + * state is set to deleting; any ongoing resize operation on the Pool are stopped; + * the Batch service starts resizing the Pool to zero Compute Nodes; any Tasks + * running on existing Compute Nodes are terminated and requeued (as if a resize + * Pool operation had been requested with the default requeue option); finally, + * the Pool is removed from the system. Because running Tasks are requeued, the + * user can rerun these Tasks by updating their Job to target a different Pool. + * The Tasks can then run on the new Pool. If you want to override the requeue + * behavior, then you should call resize Pool explicitly to shrink the Pool to + * zero size before deleting the Pool. If you call an Update, Patch or Delete API + * on a Pool in the deleting state, it will fail with HTTP status code 409 with + * error code PoolBeingDeleted. + */ + deletePool( + poolId: string, + options: DeletePoolOptions = { requestOptions: {} } + ): Promise { + return deletePool(this._client, poolId, options); + } + + /** Gets basic properties of a Pool. */ + poolExists( + poolId: string, + options: PoolExistsOptions = { requestOptions: {} } + ): Promise { + return poolExists(this._client, poolId, options); + } + + /** Gets information about the specified Pool. */ + getPool( + poolId: string, + options: GetPoolOptions = { requestOptions: {} } + ): Promise { + return getPool(this._client, poolId, options); + } + + /** + * This only replaces the Pool properties specified in the request. For example, + * if the Pool has a StartTask associated with it, and a request does not specify + * a StartTask element, then the Pool keeps the existing StartTask. + */ + updatePool( + poolId: string, + body: BatchPoolUpdateOptions, + options: UpdatePoolOptions = { requestOptions: {} } + ): Promise { + return updatePool(this._client, poolId, body, options); + } + + /** Disables automatic scaling for a Pool. */ + disablePoolAutoScale( + poolId: string, + options: DisablePoolAutoScaleOptions = { requestOptions: {} } + ): Promise { + return disablePoolAutoScale(this._client, poolId, options); + } + + /** + * You cannot enable automatic scaling on a Pool if a resize operation is in + * progress on the Pool. If automatic scaling of the Pool is currently disabled, + * you must specify a valid autoscale formula as part of the request. If automatic + * scaling of the Pool is already enabled, you may specify a new autoscale formula + * and/or a new evaluation interval. You cannot call this API for the same Pool + * more than once every 30 seconds. + */ + enablePoolAutoScale( + poolId: string, + body: BatchPoolEnableAutoScaleOptions, + options: EnablePoolAutoScaleOptions = { requestOptions: {} } + ): Promise { + return enablePoolAutoScale(this._client, poolId, body, options); + } + + /** + * This API is primarily for validating an autoscale formula, as it simply returns + * the result without applying the formula to the Pool. The Pool must have auto + * scaling enabled in order to evaluate a formula. + */ + evaluatePoolAutoScale( + poolId: string, + body: BatchPoolEvaluateAutoScaleOptions, + options: EvaluatePoolAutoScaleOptions = { requestOptions: {} } + ): Promise { + return evaluatePoolAutoScale(this._client, poolId, body, options); + } + + /** + * You can only resize a Pool when its allocation state is steady. If the Pool is + * already resizing, the request fails with status code 409. When you resize a + * Pool, the Pool's allocation state changes from steady to resizing. You cannot + * resize Pools which are configured for automatic scaling. If you try to do this, + * the Batch service returns an error 409. If you resize a Pool downwards, the + * Batch service chooses which Compute Nodes to remove. To remove specific Compute + * Nodes, use the Pool remove Compute Nodes API instead. + */ + resizePool( + poolId: string, + body: BatchPoolResizeOptions, + options: ResizePoolOptions = { requestOptions: {} } + ): Promise { + return resizePool(this._client, poolId, body, options); + } + + /** + * This does not restore the Pool to its previous state before the resize + * operation: it only stops any further changes being made, and the Pool maintains + * its current state. After stopping, the Pool stabilizes at the number of Compute + * Nodes it was at when the stop operation was done. During the stop operation, + * the Pool allocation state changes first to stopping and then to steady. A + * resize operation need not be an explicit resize Pool request; this API can also + * be used to halt the initial sizing of the Pool when it is created. + */ + stopPoolResize( + poolId: string, + options: StopPoolResizeOptions = { requestOptions: {} } + ): Promise { + return stopPoolResize(this._client, poolId, options); + } + + /** + * This fully replaces all the updatable properties of the Pool. For example, if + * the Pool has a StartTask associated with it and if StartTask is not specified + * with this request, then the Batch service will remove the existing StartTask. + */ + replacePoolProperties( + poolId: string, + body: BatchPoolReplaceOptions, + options: ReplacePoolPropertiesOptions = { requestOptions: {} } + ): Promise { + return replacePoolProperties(this._client, poolId, body, options); + } + + /** + * This operation can only run when the allocation state of the Pool is steady. + * When this operation runs, the allocation state changes from steady to resizing. + * Each request may remove up to 100 nodes. + */ + removeNodes( + poolId: string, + body: NodeRemoveOptions, + options: RemoveNodesOptions = { requestOptions: {} } + ): Promise { + return removeNodes(this._client, poolId, body, options); + } + + /** Lists all Virtual Machine Images supported by the Azure Batch service. */ + listSupportedImages( + options: ListSupportedImagesOptions = { requestOptions: {} } + ): Promise { + return listSupportedImages(this._client, options); + } + + /** + * Gets the number of Compute Nodes in each state, grouped by Pool. Note that the + * numbers returned may not always be up to date. If you need exact node counts, + * use a list query. + */ + listPoolNodeCounts( + options: ListPoolNodeCountsOptions = { requestOptions: {} } + ): Promise { + return listPoolNodeCounts(this._client, options); + } + + /** + * Deleting a Job also deletes all Tasks that are part of that Job, and all Job + * statistics. This also overrides the retention period for Task data; that is, if + * the Job contains Tasks which are still retained on Compute Nodes, the Batch + * services deletes those Tasks' working directories and all their contents. When + * a Delete Job request is received, the Batch service sets the Job to the + * deleting state. All update operations on a Job that is in deleting state will + * fail with status code 409 (Conflict), with additional information indicating + * that the Job is being deleted. + */ + deleteJob( + jobId: string, + options: DeleteJobOptions = { requestOptions: {} } + ): Promise { + return deleteJob(this._client, jobId, options); + } + + /** Gets information about the specified Job. */ + getJob( + jobId: string, + options: GetJobOptions = { requestOptions: {} } + ): Promise { + return getJob(this._client, jobId, options); + } + + /** + * This replaces only the Job properties specified in the request. For example, if + * the Job has constraints, and a request does not specify the constraints + * element, then the Job keeps the existing constraints. + */ + updateJob( + jobId: string, + body: BatchJobUpdateOptions, + options: UpdateJobOptions = { requestOptions: {} } + ): Promise { + return updateJob(this._client, jobId, body, options); + } + + /** + * This fully replaces all the updatable properties of the Job. For example, if + * the Job has constraints associated with it and if constraints is not specified + * with this request, then the Batch service will remove the existing constraints. + */ + replaceJob( + jobId: string, + body: BatchJob, + options: ReplaceJobOptions = { requestOptions: {} } + ): Promise { + return replaceJob(this._client, jobId, body, options); + } + + /** + * The Batch Service immediately moves the Job to the disabling state. Batch then + * uses the disableTasks parameter to determine what to do with the currently + * running Tasks of the Job. The Job remains in the disabling state until the + * disable operation is completed and all Tasks have been dealt with according to + * the disableTasks option; the Job then moves to the disabled state. No new Tasks + * are started under the Job until it moves back to active state. If you try to + * disable a Job that is in any state other than active, disabling, or disabled, + * the request fails with status code 409. + */ + disableJob( + jobId: string, + body: BatchJobDisableOptions, + options: DisableJobOptions = { requestOptions: {} } + ): Promise { + return disableJob(this._client, jobId, body, options); + } + + /** + * When you call this API, the Batch service sets a disabled Job to the enabling + * state. After the this operation is completed, the Job moves to the active + * state, and scheduling of new Tasks under the Job resumes. The Batch service + * does not allow a Task to remain in the active state for more than 180 days. + * Therefore, if you enable a Job containing active Tasks which were added more + * than 180 days ago, those Tasks will not run. + */ + enableJob( + jobId: string, + options: EnableJobOptions = { requestOptions: {} } + ): Promise { + return enableJob(this._client, jobId, options); + } + + /** + * When a Terminate Job request is received, the Batch service sets the Job to the + * terminating state. The Batch service then terminates any running Tasks + * associated with the Job and runs any required Job release Tasks. Then the Job + * moves into the completed state. If there are any Tasks in the Job in the active + * state, they will remain in the active state. Once a Job is terminated, new + * Tasks cannot be added and any remaining active Tasks will not be scheduled. + */ + terminateJob( + jobId: string, + body: BatchJobTerminateOptions, + options: TerminateJobOptions = { requestOptions: {} } + ): Promise { + return terminateJob(this._client, jobId, body, options); + } + + /** + * The Batch service supports two ways to control the work done as part of a Job. + * In the first approach, the user specifies a Job Manager Task. The Batch service + * launches this Task when it is ready to start the Job. The Job Manager Task + * controls all other Tasks that run under this Job, by using the Task APIs. In + * the second approach, the user directly controls the execution of Tasks under an + * active Job, by using the Task APIs. Also note: when naming Jobs, avoid + * including sensitive information such as user names or secret project names. + * This information may appear in telemetry logs accessible to Microsoft Support + * engineers. + */ + createJob( + body: BatchJobCreateOptions, + options: CreateJobOptions = { requestOptions: {} } + ): Promise { + return createJob(this._client, body, options); + } + + /** Lists all of the Jobs in the specified Account. */ + listJobs( + options: ListJobsOptions = { requestOptions: {} } + ): Promise { + return listJobs(this._client, options); + } + + /** Lists the Jobs that have been created under the specified Job Schedule. */ + listJobsFromSchedule( + jobScheduleId: string, + options: ListJobsFromScheduleOptions = { requestOptions: {} } + ): Promise { + return listJobsFromSchedule(this._client, jobScheduleId, options); + } + + /** + * This API returns the Job Preparation and Job Release Task status on all Compute + * Nodes that have run the Job Preparation or Job Release Task. This includes + * Compute Nodes which have since been removed from the Pool. If this API is + * invoked on a Job which has no Job Preparation or Job Release Task, the Batch + * service returns HTTP status code 409 (Conflict) with an error code of + * JobPreparationTaskNotSpecified. + */ + listJobPreparationAndReleaseTaskStatus( + jobId: string, + options: ListJobPreparationAndReleaseTaskStatusOptions = { + requestOptions: {}, + } + ): Promise { + return listJobPreparationAndReleaseTaskStatus(this._client, jobId, options); + } + + /** + * Task counts provide a count of the Tasks by active, running or completed Task + * state, and a count of Tasks which succeeded or failed. Tasks in the preparing + * state are counted as running. Note that the numbers returned may not always be + * up to date. If you need exact task counts, use a list query. + */ + getJobTaskCounts( + jobId: string, + options: GetJobTaskCountsOptions = { requestOptions: {} } + ): Promise { + return getJobTaskCounts(this._client, jobId, options); + } + + /** Creates a Certificate to the specified Account. */ + createCertificate( + body: BatchCertificate, + options: CreateCertificateOptions = { requestOptions: {} } + ): Promise { + return createCertificate(this._client, body, options); + } + + /** Lists all of the Certificates that have been added to the specified Account. */ + listCertificates( + options: ListCertificatesOptions = { requestOptions: {} } + ): Promise { + return listCertificates(this._client, options); + } + + /** + * If you try to delete a Certificate that is being used by a Pool or Compute + * Node, the status of the Certificate changes to deleteFailed. If you decide that + * you want to continue using the Certificate, you can use this operation to set + * the status of the Certificate back to active. If you intend to delete the + * Certificate, you do not need to run this operation after the deletion failed. + * You must make sure that the Certificate is not being used by any resources, and + * then you can try again to delete the Certificate. + */ + cancelCertificateDeletion( + thumbprintAlgorithm: string, + thumbprint: string, + options: CancelCertificateDeletionOptions = { requestOptions: {} } + ): Promise { + return cancelCertificateDeletion( + this._client, + thumbprintAlgorithm, + thumbprint, + options + ); + } + + /** + * You cannot delete a Certificate if a resource (Pool or Compute Node) is using + * it. Before you can delete a Certificate, you must therefore make sure that the + * Certificate is not associated with any existing Pools, the Certificate is not + * installed on any Nodes (even if you remove a Certificate from a Pool, it is not + * removed from existing Compute Nodes in that Pool until they restart), and no + * running Tasks depend on the Certificate. If you try to delete a Certificate + * that is in use, the deletion fails. The Certificate status changes to + * deleteFailed. You can use Cancel Delete Certificate to set the status back to + * active if you decide that you want to continue using the Certificate. + */ + deleteCertificate( + thumbprintAlgorithm: string, + thumbprint: string, + options: DeleteCertificateOptions = { requestOptions: {} } + ): Promise { + return deleteCertificate( + this._client, + thumbprintAlgorithm, + thumbprint, + options + ); + } + + /** Gets information about the specified Certificate. */ + getCertificate( + thumbprintAlgorithm: string, + thumbprint: string, + options: GetCertificateOptions = { requestOptions: {} } + ): Promise { + return getCertificate( + this._client, + thumbprintAlgorithm, + thumbprint, + options + ); + } + + /** Checks the specified Job Schedule exists. */ + jobScheduleExists( + jobScheduleId: string, + options: JobScheduleExistsOptions = { requestOptions: {} } + ): Promise { + return jobScheduleExists(this._client, jobScheduleId, options); + } + + /** + * When you delete a Job Schedule, this also deletes all Jobs and Tasks under that + * schedule. When Tasks are deleted, all the files in their working directories on + * the Compute Nodes are also deleted (the retention period is ignored). The Job + * Schedule statistics are no longer accessible once the Job Schedule is deleted, + * though they are still counted towards Account lifetime statistics. + */ + deleteJobSchedule( + jobScheduleId: string, + options: DeleteJobScheduleOptions = { requestOptions: {} } + ): Promise { + return deleteJobSchedule(this._client, jobScheduleId, options); + } + + /** Gets information about the specified Job Schedule. */ + getJobSchedule( + jobScheduleId: string, + options: GetJobScheduleOptions = { requestOptions: {} } + ): Promise { + return getJobSchedule(this._client, jobScheduleId, options); + } + + /** + * This replaces only the Job Schedule properties specified in the request. For + * example, if the schedule property is not specified with this request, then the + * Batch service will keep the existing schedule. Changes to a Job Schedule only + * impact Jobs created by the schedule after the update has taken place; currently + * running Jobs are unaffected. + */ + updateJobSchedule( + jobScheduleId: string, + body: BatchJobScheduleUpdateOptions, + options: UpdateJobScheduleOptions = { requestOptions: {} } + ): Promise { + return updateJobSchedule(this._client, jobScheduleId, body, options); + } + + /** + * This fully replaces all the updatable properties of the Job Schedule. For + * example, if the schedule property is not specified with this request, then the + * Batch service will remove the existing schedule. Changes to a Job Schedule only + * impact Jobs created by the schedule after the update has taken place; currently + * running Jobs are unaffected. + */ + replaceJobSchedule( + jobScheduleId: string, + body: BatchJobSchedule, + options: ReplaceJobScheduleOptions = { requestOptions: {} } + ): Promise { + return replaceJobSchedule(this._client, jobScheduleId, body, options); + } + + /** No new Jobs will be created until the Job Schedule is enabled again. */ + disableJobSchedule( + jobScheduleId: string, + options: DisableJobScheduleOptions = { requestOptions: {} } + ): Promise { + return disableJobSchedule(this._client, jobScheduleId, options); + } + + /** Enables a Job Schedule. */ + enableJobSchedule( + jobScheduleId: string, + options: EnableJobScheduleOptions = { requestOptions: {} } + ): Promise { + return enableJobSchedule(this._client, jobScheduleId, options); + } + + /** Terminates a Job Schedule. */ + terminateJobSchedule( + jobScheduleId: string, + options: TerminateJobScheduleOptions = { requestOptions: {} } + ): Promise { + return terminateJobSchedule(this._client, jobScheduleId, options); + } + + /** Creates a Job Schedule to the specified Account. */ + createJobSchedule( + body: BatchJobScheduleCreateOptions, + options: CreateJobScheduleOptions = { requestOptions: {} } + ): Promise { + return createJobSchedule(this._client, body, options); + } + + /** Lists all of the Job Schedules in the specified Account. */ + listJobSchedules( + options: ListJobSchedulesOptions = { requestOptions: {} } + ): Promise { + return listJobSchedules(this._client, options); + } + + /** + * The maximum lifetime of a Task from addition to completion is 180 days. If a + * Task has not completed within 180 days of being added it will be terminated by + * the Batch service and left in whatever state it was in at that time. + */ + createTask( + jobId: string, + body: BatchTaskCreateOptions, + options: CreateTaskOptions = { requestOptions: {} } + ): Promise { + return createTask(this._client, jobId, body, options); + } + + /** + * For multi-instance Tasks, information such as affinityId, executionInfo and + * nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + * information about subtasks. + */ + listTasks( + jobId: string, + options: ListTasksOptions = { requestOptions: {} } + ): Promise { + return listTasks(this._client, jobId, options); + } + + /** + * Note that each Task must have a unique ID. The Batch service may not return the + * results for each Task in the same order the Tasks were submitted in this + * request. If the server times out or the connection is closed during the + * request, the request may have been partially or fully processed, or not at all. + * In such cases, the user should re-issue the request. Note that it is up to the + * user to correctly handle failures when re-issuing a request. For example, you + * should use the same Task IDs during a retry so that if the prior operation + * succeeded, the retry will not create extra Tasks unexpectedly. If the response + * contains any Tasks which failed to add, a client can retry the request. In a + * retry, it is most efficient to resubmit only Tasks that failed to add, and to + * omit Tasks that were successfully added on the first attempt. The maximum + * lifetime of a Task from addition to completion is 180 days. If a Task has not + * completed within 180 days of being added it will be terminated by the Batch + * service and left in whatever state it was in at that time. + */ + createTaskCollection( + jobId: string, + collection: BatchTaskCollection, + options: CreateTaskCollectionOptions = { requestOptions: {} } + ): Promise { + return createTaskCollection(this._client, jobId, collection, options); + } + + /** + * When a Task is deleted, all of the files in its directory on the Compute Node + * where it ran are also deleted (regardless of the retention time). For + * multi-instance Tasks, the delete Task operation applies synchronously to the + * primary task; subtasks and their files are then deleted asynchronously in the + * background. + */ + deleteTask( + jobId: string, + taskId: string, + options: DeleteTaskOptions = { requestOptions: {} } + ): Promise { + return deleteTask(this._client, jobId, taskId, options); + } + + /** + * For multi-instance Tasks, information such as affinityId, executionInfo and + * nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + * information about subtasks. + */ + getTask( + jobId: string, + taskId: string, + options: GetTaskOptions = { requestOptions: {} } + ): Promise { + return getTask(this._client, jobId, taskId, options); + } + + /** Updates the properties of the specified Task. */ + replaceTask( + jobId: string, + taskId: string, + body: BatchTask, + options: ReplaceTaskOptions = { requestOptions: {} } + ): Promise { + return replaceTask(this._client, jobId, taskId, body, options); + } + + /** If the Task is not a multi-instance Task then this returns an empty collection. */ + listSubTasks( + jobId: string, + taskId: string, + options: ListSubTasksOptions = { requestOptions: {} } + ): Promise { + return listSubTasks(this._client, jobId, taskId, options); + } + + /** + * When the Task has been terminated, it moves to the completed state. For + * multi-instance Tasks, the terminate Task operation applies synchronously to the + * primary task; subtasks are then terminated asynchronously in the background. + */ + terminateTask( + jobId: string, + taskId: string, + options: TerminateTaskOptions = { requestOptions: {} } + ): Promise { + return terminateTask(this._client, jobId, taskId, options); + } + + /** + * Reactivation makes a Task eligible to be retried again up to its maximum retry + * count. The Task's state is changed to active. As the Task is no longer in the + * completed state, any previous exit code or failure information is no longer + * available after reactivation. Each time a Task is reactivated, its retry count + * is reset to 0. Reactivation will fail for Tasks that are not completed or that + * previously completed successfully (with an exit code of 0). Additionally, it + * will fail if the Job has completed (or is terminating or deleting). + */ + reactivateTask( + jobId: string, + taskId: string, + options: ReactivateTaskOptions = { requestOptions: {} } + ): Promise { + return reactivateTask(this._client, jobId, taskId, options); + } + + /** Deletes the specified Task file from the Compute Node where the Task ran. */ + deleteTaskFile( + jobId: string, + taskId: string, + filePath: string, + options: DeleteTaskFileOptions = { requestOptions: {} } + ): Promise { + return deleteTaskFile(this._client, jobId, taskId, filePath, options); + } + + /** Returns the content of the specified Task file. */ + getTaskFile( + jobId: string, + taskId: string, + filePath: string, + options: GetTaskFileOptions = { requestOptions: {} } + ): Promise { + return getTaskFile(this._client, jobId, taskId, filePath, options); + } + + /** Gets the properties of the specified Task file. */ + getTaskFileProperties( + jobId: string, + taskId: string, + filePath: string, + options: GetTaskFilePropertiesOptions = { requestOptions: {} } + ): Promise { + return getTaskFileProperties( + this._client, + jobId, + taskId, + filePath, + options + ); + } + + /** Lists the files in a Task's directory on its Compute Node. */ + listTaskFiles( + jobId: string, + taskId: string, + options: ListTaskFilesOptions = { requestOptions: {} } + ): Promise { + return listTaskFiles(this._client, jobId, taskId, options); + } + + /** + * You can add a user Account to a Compute Node only when it is in the idle or + * running state. + */ + createNodeUser( + poolId: string, + nodeId: string, + body: BatchNodeUserCreateOptions, + options: CreateNodeUserOptions = { requestOptions: {} } + ): Promise { + return createNodeUser(this._client, poolId, nodeId, body, options); + } + + /** + * You can delete a user Account to a Compute Node only when it is in the idle or + * running state. + */ + deleteNodeUser( + poolId: string, + nodeId: string, + userName: string, + options: DeleteNodeUserOptions = { requestOptions: {} } + ): Promise { + return deleteNodeUser(this._client, poolId, nodeId, userName, options); + } + + /** + * This operation replaces of all the updatable properties of the Account. For + * example, if the expiryTime element is not specified, the current value is + * replaced with the default value, not left unmodified. You can update a user + * Account on a Compute Node only when it is in the idle or running state. + */ + replaceNodeUser( + poolId: string, + nodeId: string, + userName: string, + body: BatchNodeUserUpdateOptions, + options: ReplaceNodeUserOptions = { requestOptions: {} } + ): Promise { + return replaceNodeUser( + this._client, + poolId, + nodeId, + userName, + body, + options + ); + } + + /** Gets information about the specified Compute Node. */ + getNode( + poolId: string, + nodeId: string, + options: GetNodeOptions = { requestOptions: {} } + ): Promise { + return getNode(this._client, poolId, nodeId, options); + } + + /** You can restart a Compute Node only if it is in an idle or running state. */ + rebootNode( + poolId: string, + nodeId: string, + body: NodeRebootOptions, + options: RebootNodeOptions = { requestOptions: {} } + ): Promise { + return rebootNode(this._client, poolId, nodeId, body, options); + } + + /** + * You can reinstall the operating system on a Compute Node only if it is in an + * idle or running state. This API can be invoked only on Pools created with the + * cloud service configuration property. + */ + reimageNode( + poolId: string, + nodeId: string, + body: NodeReimageOptions, + options: ReimageNodeOptions = { requestOptions: {} } + ): Promise { + return reimageNode(this._client, poolId, nodeId, body, options); + } + + /** + * You can disable Task scheduling on a Compute Node only if its current + * scheduling state is enabled. + */ + disableNodeScheduling( + poolId: string, + nodeId: string, + body: NodeDisableSchedulingOptions, + options: DisableNodeSchedulingOptions = { requestOptions: {} } + ): Promise { + return disableNodeScheduling(this._client, poolId, nodeId, body, options); + } + + /** + * You can enable Task scheduling on a Compute Node only if its current scheduling + * state is disabled + */ + enableNodeScheduling( + poolId: string, + nodeId: string, + options: EnableNodeSchedulingOptions = { requestOptions: {} } + ): Promise { + return enableNodeScheduling(this._client, poolId, nodeId, options); + } + + /** + * Before you can remotely login to a Compute Node using the remote login + * settings, you must create a user Account on the Compute Node. This API can be + * invoked only on Pools created with the virtual machine configuration property. + * For Pools created with a cloud service configuration, see the GetRemoteDesktop + * API. + */ + getNodeRemoteLoginSettings( + poolId: string, + nodeId: string, + options: GetNodeRemoteLoginSettingsOptions = { requestOptions: {} } + ): Promise { + return getNodeRemoteLoginSettings(this._client, poolId, nodeId, options); + } + + /** + * Before you can access a Compute Node by using the RDP file, you must create a + * user Account on the Compute Node. This API can only be invoked on Pools created + * with a cloud service configuration. For Pools created with a virtual machine + * configuration, see the GetRemoteLoginSettings API. + */ + getNodeRemoteDesktopFile( + poolId: string, + nodeId: string, + options: GetNodeRemoteDesktopFileOptions = { requestOptions: {} } + ): Promise { + return getNodeRemoteDesktopFile(this._client, poolId, nodeId, options); + } + + /** + * This is for gathering Azure Batch service log files in an automated fashion + * from Compute Nodes if you are experiencing an error and wish to escalate to + * Azure support. The Azure Batch service log files should be shared with Azure + * support to aid in debugging issues with the Batch service. + */ + uploadNodeLogs( + poolId: string, + nodeId: string, + body: UploadBatchServiceLogsOptions, + options: UploadNodeLogsOptions = { requestOptions: {} } + ): Promise { + return uploadNodeLogs(this._client, poolId, nodeId, body, options); + } + + /** Lists the Compute Nodes in the specified Pool. */ + listNodes( + poolId: string, + options: ListNodesOptions = { requestOptions: {} } + ): Promise { + return listNodes(this._client, poolId, options); + } + + /** Gets information about the specified Compute Node Extension. */ + getNodeExtension( + poolId: string, + nodeId: string, + extensionName: string, + options: GetNodeExtensionOptions = { requestOptions: {} } + ): Promise { + return getNodeExtension( + this._client, + poolId, + nodeId, + extensionName, + options + ); + } + + /** Lists the Compute Nodes Extensions in the specified Pool. */ + listNodeExtensions( + poolId: string, + nodeId: string, + options: ListNodeExtensionsOptions = { requestOptions: {} } + ): Promise { + return listNodeExtensions(this._client, poolId, nodeId, options); + } + + /** Deletes the specified file from the Compute Node. */ + deleteNodeFile( + poolId: string, + nodeId: string, + filePath: string, + options: DeleteNodeFileOptions = { requestOptions: {} } + ): Promise { + return deleteNodeFile(this._client, poolId, nodeId, filePath, options); + } + + /** Returns the content of the specified Compute Node file. */ + getNodeFile( + poolId: string, + nodeId: string, + filePath: string, + options: GetNodeFileOptions = { requestOptions: {} } + ): Promise { + return getNodeFile(this._client, poolId, nodeId, filePath, options); + } + + /** Gets the properties of the specified Compute Node file. */ + getNodeFileProperties( + poolId: string, + nodeId: string, + filePath: string, + options: GetNodeFilePropertiesOptions = { requestOptions: {} } + ): Promise { + return getNodeFileProperties( + this._client, + poolId, + nodeId, + filePath, + options + ); + } + + /** Lists all of the files in Task directories on the specified Compute Node. */ + listNodeFiles( + poolId: string, + nodeId: string, + options: ListNodeFilesOptions = { requestOptions: {} } + ): Promise { + return listNodeFiles(this._client, poolId, nodeId, options); + } +} diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/BatchContext.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/BatchContext.ts new file mode 100644 index 0000000000..6df3f86648 --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/BatchContext.ts @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TokenCredential } from "@azure/core-auth"; +import { ClientOptions } from "@azure-rest/core-client"; +import { BatchContext } from "../rest/index.js"; +import getClient from "../rest/index.js"; + +export interface BatchClientOptions extends ClientOptions {} + +export { BatchContext } from "../rest/index.js"; + +/** Azure Batch provides Cloud-scale job scheduling and compute management. */ +export function createBatch( + endpoint: string, + credential: TokenCredential, + options: BatchClientOptions = {} +): BatchContext { + const clientContext = getClient(endpoint, credential, options); + return clientContext; +} diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/index.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/index.ts new file mode 100644 index 0000000000..cfc770d76f --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/index.ts @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { + createBatch, + BatchClientOptions, + BatchContext, +} from "./BatchContext.js"; +export { + listApplications, + getApplication, + listPoolUsageMetrics, + createPool, + listPools, + deletePool, + poolExists, + getPool, + updatePool, + disablePoolAutoScale, + enablePoolAutoScale, + evaluatePoolAutoScale, + resizePool, + stopPoolResize, + replacePoolProperties, + removeNodes, + listSupportedImages, + listPoolNodeCounts, + deleteJob, + getJob, + updateJob, + replaceJob, + disableJob, + enableJob, + terminateJob, + createJob, + listJobs, + listJobsFromSchedule, + listJobPreparationAndReleaseTaskStatus, + getJobTaskCounts, + createCertificate, + listCertificates, + cancelCertificateDeletion, + deleteCertificate, + getCertificate, + jobScheduleExists, + deleteJobSchedule, + getJobSchedule, + updateJobSchedule, + replaceJobSchedule, + disableJobSchedule, + enableJobSchedule, + terminateJobSchedule, + createJobSchedule, + listJobSchedules, + createTask, + listTasks, + createTaskCollection, + deleteTask, + getTask, + replaceTask, + listSubTasks, + terminateTask, + reactivateTask, + deleteTaskFile, + getTaskFile, + getTaskFileProperties, + listTaskFiles, + createNodeUser, + deleteNodeUser, + replaceNodeUser, + getNode, + rebootNode, + reimageNode, + disableNodeScheduling, + enableNodeScheduling, + getNodeRemoteLoginSettings, + getNodeRemoteDesktopFile, + uploadNodeLogs, + listNodes, + getNodeExtension, + listNodeExtensions, + deleteNodeFile, + getNodeFile, + getNodeFileProperties, + listNodeFiles, +} from "./operations.js"; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/operations.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/operations.ts new file mode 100644 index 0000000000..13f022ec10 --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/operations.ts @@ -0,0 +1,17366 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { + ApplicationListResult, + BatchApplication, + PoolListUsageMetricsResult, + BatchPoolCreateOptions, + BatchPoolListResult, + BatchPool, + AutoScaleRun, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchPoolReplaceOptions, + NodeRemoveOptions, + AccountListSupportedImagesResult, + PoolNodeCountsListResult, + BatchJob, + BatchJobUpdateOptions, + BatchJobDisableOptions, + BatchJobTerminateOptions, + BatchJobCreateOptions, + BatchJobListResult, + BatchJobListPreparationAndReleaseTaskStatusResult, + TaskCountsResult, + BatchCertificate, + CertificateListResult, + BatchJobSchedule, + BatchJobScheduleUpdateOptions, + BatchJobScheduleCreateOptions, + BatchJobScheduleListResult, + BatchTaskCreateOptions, + BatchTaskListResult, + BatchTask, + BatchTaskCollection, + TaskAddCollectionResult, + BatchTaskListSubtasksResult, + NodeFileListResult, + BatchNodeUserCreateOptions, + BatchNodeUserUpdateOptions, + BatchNode, + NodeRebootOptions, + NodeReimageOptions, + NodeDisableSchedulingOptions, + BatchNodeRemoteLoginSettingsResult, + UploadBatchServiceLogsOptions, + UploadBatchServiceLogsResult, + BatchNodeListResult, + NodeVMExtension, + NodeVMExtensionList, +} from "../models/models.js"; +import { + isUnexpected, + BatchContext as Client, + CancelCertificateDeletion204Response, + CancelCertificateDeletionDefaultResponse, + CreateCertificate201Response, + CreateCertificateDefaultResponse, + CreateJob201Response, + CreateJobDefaultResponse, + CreateJobSchedule201Response, + CreateJobScheduleDefaultResponse, + CreateNodeUser201Response, + CreateNodeUserDefaultResponse, + CreatePool201Response, + CreatePoolDefaultResponse, + CreateTask201Response, + CreateTaskCollection200Response, + CreateTaskCollectionDefaultResponse, + CreateTaskDefaultResponse, + DeleteCertificate202Response, + DeleteCertificateDefaultResponse, + DeleteJob202Response, + DeleteJobDefaultResponse, + DeleteJobSchedule202Response, + DeleteJobScheduleDefaultResponse, + DeleteNodeFile200Response, + DeleteNodeFileDefaultResponse, + DeleteNodeUser200Response, + DeleteNodeUserDefaultResponse, + DeletePool202Response, + DeletePoolDefaultResponse, + DeleteTask200Response, + DeleteTaskDefaultResponse, + DeleteTaskFile200Response, + DeleteTaskFileDefaultResponse, + DisableJob202Response, + DisableJobDefaultResponse, + DisableJobSchedule204Response, + DisableJobScheduleDefaultResponse, + DisableNodeScheduling200Response, + DisableNodeSchedulingDefaultResponse, + DisablePoolAutoScale200Response, + DisablePoolAutoScaleDefaultResponse, + EnableJob202Response, + EnableJobDefaultResponse, + EnableJobSchedule204Response, + EnableJobScheduleDefaultResponse, + EnableNodeScheduling200Response, + EnableNodeSchedulingDefaultResponse, + EnablePoolAutoScale200Response, + EnablePoolAutoScaleDefaultResponse, + EvaluatePoolAutoScale200Response, + EvaluatePoolAutoScaleDefaultResponse, + GetApplication200Response, + GetApplicationDefaultResponse, + GetCertificate200Response, + GetCertificateDefaultResponse, + GetJob200Response, + GetJobDefaultResponse, + GetJobSchedule200Response, + GetJobScheduleDefaultResponse, + GetJobTaskCounts200Response, + GetJobTaskCountsDefaultResponse, + GetNode200Response, + GetNodeDefaultResponse, + GetNodeExtension200Response, + GetNodeExtensionDefaultResponse, + GetNodeFile200Response, + GetNodeFileDefaultResponse, + GetNodeFileProperties200Response, + GetNodeFilePropertiesDefaultResponse, + GetNodeRemoteDesktopFile200Response, + GetNodeRemoteDesktopFileDefaultResponse, + GetNodeRemoteLoginSettings200Response, + GetNodeRemoteLoginSettingsDefaultResponse, + GetPool200Response, + GetPoolDefaultResponse, + GetTask200Response, + GetTaskDefaultResponse, + GetTaskFile200Response, + GetTaskFileDefaultResponse, + GetTaskFileProperties200Response, + GetTaskFilePropertiesDefaultResponse, + JobScheduleExists200Response, + JobScheduleExists404Response, + JobScheduleExistsDefaultResponse, + ListApplications200Response, + ListApplicationsDefaultResponse, + ListCertificates200Response, + ListCertificatesDefaultResponse, + ListJobPreparationAndReleaseTaskStatus200Response, + ListJobPreparationAndReleaseTaskStatusDefaultResponse, + ListJobs200Response, + ListJobSchedules200Response, + ListJobSchedulesDefaultResponse, + ListJobsDefaultResponse, + ListJobsFromSchedule200Response, + ListJobsFromScheduleDefaultResponse, + ListNodeExtensions200Response, + ListNodeExtensionsDefaultResponse, + ListNodeFiles200Response, + ListNodeFilesDefaultResponse, + ListNodes200Response, + ListNodesDefaultResponse, + ListPoolNodeCounts200Response, + ListPoolNodeCountsDefaultResponse, + ListPools200Response, + ListPoolsDefaultResponse, + ListPoolUsageMetrics200Response, + ListPoolUsageMetricsDefaultResponse, + ListSubTasks200Response, + ListSubTasksDefaultResponse, + ListSupportedImages200Response, + ListSupportedImagesDefaultResponse, + ListTaskFiles200Response, + ListTaskFilesDefaultResponse, + ListTasks200Response, + ListTasksDefaultResponse, + PoolExists200Response, + PoolExists404Response, + PoolExistsDefaultResponse, + ReactivateTask204Response, + ReactivateTaskDefaultResponse, + RebootNode202Response, + RebootNodeDefaultResponse, + ReimageNode202Response, + ReimageNodeDefaultResponse, + RemoveNodes202Response, + RemoveNodesDefaultResponse, + ReplaceJob200Response, + ReplaceJobDefaultResponse, + ReplaceJobSchedule200Response, + ReplaceJobScheduleDefaultResponse, + ReplaceNodeUser200Response, + ReplaceNodeUserDefaultResponse, + ReplacePoolProperties204Response, + ReplacePoolPropertiesDefaultResponse, + ReplaceTask200Response, + ReplaceTaskDefaultResponse, + ResizePool202Response, + ResizePoolDefaultResponse, + StopPoolResize202Response, + StopPoolResizeDefaultResponse, + TerminateJob202Response, + TerminateJobDefaultResponse, + TerminateJobSchedule202Response, + TerminateJobScheduleDefaultResponse, + TerminateTask204Response, + TerminateTaskDefaultResponse, + UpdateJob200Response, + UpdateJobDefaultResponse, + UpdateJobSchedule200Response, + UpdateJobScheduleDefaultResponse, + UpdatePool200Response, + UpdatePoolDefaultResponse, + UploadNodeLogs200Response, + UploadNodeLogsDefaultResponse, +} from "../rest/index.js"; +import { + StreamableMethod, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; +import { uint8ArrayToString, stringToUint8Array } from "@azure/core-util"; +import { + ListApplicationsOptions, + GetApplicationOptions, + ListPoolUsageMetricsOptions, + CreatePoolOptions, + ListPoolsOptions, + DeletePoolOptions, + PoolExistsOptions, + GetPoolOptions, + UpdatePoolOptions, + DisablePoolAutoScaleOptions, + EnablePoolAutoScaleOptions, + EvaluatePoolAutoScaleOptions, + ResizePoolOptions, + StopPoolResizeOptions, + ReplacePoolPropertiesOptions, + RemoveNodesOptions, + ListSupportedImagesOptions, + ListPoolNodeCountsOptions, + DeleteJobOptions, + GetJobOptions, + UpdateJobOptions, + ReplaceJobOptions, + DisableJobOptions, + EnableJobOptions, + TerminateJobOptions, + CreateJobOptions, + ListJobsOptions, + ListJobsFromScheduleOptions, + ListJobPreparationAndReleaseTaskStatusOptions, + GetJobTaskCountsOptions, + CreateCertificateOptions, + ListCertificatesOptions, + CancelCertificateDeletionOptions, + DeleteCertificateOptions, + GetCertificateOptions, + JobScheduleExistsOptions, + DeleteJobScheduleOptions, + GetJobScheduleOptions, + UpdateJobScheduleOptions, + ReplaceJobScheduleOptions, + DisableJobScheduleOptions, + EnableJobScheduleOptions, + TerminateJobScheduleOptions, + CreateJobScheduleOptions, + ListJobSchedulesOptions, + CreateTaskOptions, + ListTasksOptions, + CreateTaskCollectionOptions, + DeleteTaskOptions, + GetTaskOptions, + ReplaceTaskOptions, + ListSubTasksOptions, + TerminateTaskOptions, + ReactivateTaskOptions, + DeleteTaskFileOptions, + GetTaskFileOptions, + GetTaskFilePropertiesOptions, + ListTaskFilesOptions, + CreateNodeUserOptions, + DeleteNodeUserOptions, + ReplaceNodeUserOptions, + GetNodeOptions, + RebootNodeOptions, + ReimageNodeOptions, + DisableNodeSchedulingOptions, + EnableNodeSchedulingOptions, + GetNodeRemoteLoginSettingsOptions, + GetNodeRemoteDesktopFileOptions, + UploadNodeLogsOptions, + ListNodesOptions, + GetNodeExtensionOptions, + ListNodeExtensionsOptions, + DeleteNodeFileOptions, + GetNodeFileOptions, + GetNodeFilePropertiesOptions, + ListNodeFilesOptions, +} from "../models/options.js"; + +export function _listApplicationsSend( + context: Client, + options: ListApplicationsOptions = { requestOptions: {} } +): StreamableMethod< + ListApplications200Response | ListApplicationsDefaultResponse +> { + return context + .path("/applications") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + }, + }); +} + +export async function _listApplicationsDeserialize( + result: ListApplications200Response | ListApplicationsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + id: p["id"], + displayName: p["displayName"], + versions: p["versions"], + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** + * This operation returns only Applications and versions that are available for + * use on Compute Nodes; that is, that can be used in an Package reference. For + * administrator information about applications and versions that are not yet + * available to Compute Nodes, use the Azure portal or the Azure Resource Manager + * API. + */ +export async function listApplications( + context: Client, + options: ListApplicationsOptions = { requestOptions: {} } +): Promise { + const result = await _listApplicationsSend(context, options); + return _listApplicationsDeserialize(result); +} + +export function _getApplicationSend( + context: Client, + applicationId: string, + options: GetApplicationOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/applications/{applicationId}", applicationId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _getApplicationDeserialize( + result: GetApplication200Response | GetApplicationDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + displayName: result.body["displayName"], + versions: result.body["versions"], + }; +} + +/** + * This operation returns only Applications and versions that are available for + * use on Compute Nodes; that is, that can be used in an Package reference. For + * administrator information about Applications and versions that are not yet + * available to Compute Nodes, use the Azure portal or the Azure Resource Manager + * API. + */ +export async function getApplication( + context: Client, + applicationId: string, + options: GetApplicationOptions = { requestOptions: {} } +): Promise { + const result = await _getApplicationSend(context, applicationId, options); + return _getApplicationDeserialize(result); +} + +export function _listPoolUsageMetricsSend( + context: Client, + options: ListPoolUsageMetricsOptions = { requestOptions: {} } +): StreamableMethod< + ListPoolUsageMetrics200Response | ListPoolUsageMetricsDefaultResponse +> { + return context + .path("/poolusagemetrics") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + starttime: options?.starttime?.toISOString(), + endtime: options?.endtime?.toISOString(), + $filter: options?.$filter, + }, + }); +} + +export async function _listPoolUsageMetricsDeserialize( + result: ListPoolUsageMetrics200Response | ListPoolUsageMetricsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + poolId: p["poolId"], + startTime: new Date(p["startTime"]), + endTime: new Date(p["endTime"]), + vmSize: p["vmSize"], + totalCoreHours: p["totalCoreHours"], + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** + * If you do not specify a $filter clause including a poolId, the response + * includes all Pools that existed in the Account in the time range of the + * returned aggregation intervals. If you do not specify a $filter clause + * including a startTime or endTime these filters default to the start and end + * times of the last aggregation interval currently available; that is, only the + * last aggregation interval is returned. + */ +export async function listPoolUsageMetrics( + context: Client, + options: ListPoolUsageMetricsOptions = { requestOptions: {} } +): Promise { + const result = await _listPoolUsageMetricsSend(context, options); + return _listPoolUsageMetricsDeserialize(result); +} + +export function _createPoolSend( + context: Client, + body: BatchPoolCreateOptions, + options: CreatePoolOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + id: body["id"], + displayName: body["displayName"], + vmSize: body["vmSize"], + cloudServiceConfiguration: !body.cloudServiceConfiguration + ? undefined + : { + osFamily: body.cloudServiceConfiguration?.["osFamily"], + osVersion: body.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !body.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + body.virtualMachineConfiguration?.imageReference["publisher"], + offer: + body.virtualMachineConfiguration?.imageReference["offer"], + sku: body.virtualMachineConfiguration?.imageReference["sku"], + version: + body.virtualMachineConfiguration?.imageReference["version"], + virtualMachineImageId: + body.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + }, + nodeAgentSKUId: + body.virtualMachineConfiguration?.["nodeAgentSKUId"], + windowsConfiguration: !body.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + body.virtualMachineConfiguration?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + body.virtualMachineConfiguration?.["dataDisks"] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: body.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !body.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: body.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + body.virtualMachineConfiguration + ?.containerConfiguration?.["containerImageNames"], + containerRegistries: ( + body.virtualMachineConfiguration + ?.containerConfiguration?.["containerRegistries"] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + }, + diskEncryptionConfiguration: !body.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + body.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.["targets"], + }, + nodePlacementConfiguration: !body.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + body.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + body.virtualMachineConfiguration?.["extensions"] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: p["provisionAfterExtensions"], + })), + osDisk: !body.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !body.virtualMachineConfiguration + ?.osDisk?.ephemeralOSDiskSettings + ? undefined + : { + placement: + body.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings?.["placement"], + }, + }, + }, + resizeTimeout: body["resizeTimeout"], + targetDedicatedNodes: body["targetDedicatedNodes"], + targetLowPriorityNodes: body["targetLowPriorityNodes"], + enableAutoScale: body["enableAutoScale"], + autoScaleFormula: body["autoScaleFormula"], + autoScaleEvaluationInterval: body["autoScaleEvaluationInterval"], + enableInterNodeCommunication: body["enableInterNodeCommunication"], + networkConfiguration: !body.networkConfiguration + ? undefined + : { + subnetId: body.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + body.networkConfiguration?.["dynamicVNetAssignmentScope"], + endpointConfiguration: !body.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + body.networkConfiguration?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: p["frontendPortRangeStart"], + frontendPortRangeEnd: p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !body.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + body.networkConfiguration?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + body.networkConfiguration?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + body.networkConfiguration?.["enableAcceleratedNetworking"], + }, + startTask: !body.startTask + ? undefined + : { + commandLine: body.startTask?.["commandLine"], + containerSettings: !body.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + body.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: body.startTask?.containerSettings?.["imageName"], + registry: !body.startTask?.containerSettings?.registry + ? undefined + : { + username: + body.startTask?.containerSettings?.registry?.[ + "username" + ], + password: + body.startTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + body.startTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.startTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.startTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (body.startTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + body.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + userIdentity: !body.startTask?.userIdentity + ? undefined + : { + username: body.startTask?.userIdentity?.["username"], + autoUser: !body.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.startTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: body.startTask?.["maxTaskRetryCount"], + waitForSuccess: body.startTask?.["waitForSuccess"], + }, + certificateReferences: (body["certificateReferences"] ?? []).map( + (p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + }) + ), + applicationPackageReferences: ( + body["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: body["applicationLicenses"], + taskSlotsPerNode: body["taskSlotsPerNode"], + taskSchedulingPolicy: !body.taskSchedulingPolicy + ? undefined + : { nodeFillType: body.taskSchedulingPolicy?.["nodeFillType"] }, + userAccounts: (body["userAccounts"] ?? []).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { loginMode: p.windowsUserConfiguration?.["loginMode"] }, + })), + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + mountConfiguration: (body["mountConfiguration"] ?? []).map((p) => ({ + azureBlobFileSystemConfiguration: !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.["accountName"], + containerName: + p.azureBlobFileSystemConfiguration?.["containerName"], + accountKey: p.azureBlobFileSystemConfiguration?.["accountKey"], + sasKey: p.azureBlobFileSystemConfiguration?.["sasKey"], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.["blobfuseOptions"], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.["relativeMountPath"], + identityReference: !p.azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration?.identityReference?.[ + "resourceId" + ], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: !p.azureFileShareConfiguration + ? undefined + : { + accountName: p.azureFileShareConfiguration?.["accountName"], + azureFileUrl: p.azureFileShareConfiguration?.["azureFileUrl"], + accountKey: p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.["relativeMountPath"], + mountOptions: p.azureFileShareConfiguration?.["mountOptions"], + }, + })), + targetNodeCommunicationMode: body["targetNodeCommunicationMode"], + }, + }); +} + +export async function _createPoolDeserialize( + result: CreatePool201Response | CreatePoolDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * When naming Pools, avoid including sensitive information such as user names or + * secret project names. This information may appear in telemetry logs accessible + * to Microsoft Support engineers. + */ +export async function createPool( + context: Client, + body: BatchPoolCreateOptions, + options: CreatePoolOptions = { requestOptions: {} } +): Promise { + const result = await _createPoolSend(context, body, options); + return _createPoolDeserialize(result); +} + +export function _listPoolsSend( + context: Client, + options: ListPoolsOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _listPoolsDeserialize( + result: ListPools200Response | ListPoolsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + id: p["id"], + displayName: p["displayName"], + url: p["url"], + eTag: p["eTag"], + lastModified: + p["lastModified"] !== undefined + ? new Date(p["lastModified"]) + : undefined, + creationTime: + p["creationTime"] !== undefined + ? new Date(p["creationTime"]) + : undefined, + state: p["state"], + stateTransitionTime: + p["stateTransitionTime"] !== undefined + ? new Date(p["stateTransitionTime"]) + : undefined, + allocationState: p["allocationState"], + allocationStateTransitionTime: + p["allocationStateTransitionTime"] !== undefined + ? new Date(p["allocationStateTransitionTime"]) + : undefined, + vmSize: p["vmSize"], + cloudServiceConfiguration: !p.cloudServiceConfiguration + ? undefined + : { + osFamily: p.cloudServiceConfiguration?.["osFamily"], + osVersion: p.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !p.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + p.virtualMachineConfiguration?.imageReference["publisher"], + offer: p.virtualMachineConfiguration?.imageReference["offer"], + sku: p.virtualMachineConfiguration?.imageReference["sku"], + version: p.virtualMachineConfiguration?.imageReference["version"], + virtualMachineImageId: + p.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + exactVersion: + p.virtualMachineConfiguration?.imageReference["exactVersion"], + }, + nodeAgentSKUId: p.virtualMachineConfiguration?.["nodeAgentSKUId"], + windowsConfiguration: !p.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + p.virtualMachineConfiguration?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: (p.virtualMachineConfiguration?.["dataDisks"] ?? []).map( + (p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + }) + ), + licenseType: p.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !p.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: p.virtualMachineConfiguration?.containerConfiguration?.[ + "type" + ], + containerImageNames: + p.virtualMachineConfiguration?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + p.virtualMachineConfiguration?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + }, + diskEncryptionConfiguration: !p.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + p.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.["targets"], + }, + nodePlacementConfiguration: !p.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + p.virtualMachineConfiguration?.nodePlacementConfiguration?.[ + "policy" + ], + }, + extensions: ( + p.virtualMachineConfiguration?.["extensions"] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: p["provisionAfterExtensions"], + })), + osDisk: !p.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !p.virtualMachineConfiguration + ?.osDisk?.ephemeralOSDiskSettings + ? undefined + : { + placement: + p.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings?.["placement"], + }, + }, + }, + resizeTimeout: p["resizeTimeout"], + resizeErrors: (p["resizeErrors"] ?? []).map((p) => ({ + code: p["code"], + message: p["message"], + values: (p["values"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + })), + currentDedicatedNodes: p["currentDedicatedNodes"], + currentLowPriorityNodes: p["currentLowPriorityNodes"], + targetDedicatedNodes: p["targetDedicatedNodes"], + targetLowPriorityNodes: p["targetLowPriorityNodes"], + enableAutoScale: p["enableAutoScale"], + autoScaleFormula: p["autoScaleFormula"], + autoScaleEvaluationInterval: p["autoScaleEvaluationInterval"], + autoScaleRun: !p.autoScaleRun + ? undefined + : { + timestamp: new Date(p.autoScaleRun?.["timestamp"]), + results: p.autoScaleRun?.["results"], + error: !p.autoScaleRun?.error + ? undefined + : { + code: p.autoScaleRun?.error?.["code"], + message: p.autoScaleRun?.error?.["message"], + values: (p.autoScaleRun?.error?.["values"] ?? []).map( + (p) => ({ name: p["name"], value: p["value"] }) + ), + }, + }, + enableInterNodeCommunication: p["enableInterNodeCommunication"], + networkConfiguration: !p.networkConfiguration + ? undefined + : { + subnetId: p.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + p.networkConfiguration?.["dynamicVNetAssignmentScope"], + endpointConfiguration: !p.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + p.networkConfiguration?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: p["frontendPortRangeStart"], + frontendPortRangeEnd: p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !p.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + p.networkConfiguration?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + p.networkConfiguration?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + p.networkConfiguration?.["enableAcceleratedNetworking"], + }, + startTask: !p.startTask + ? undefined + : { + commandLine: p.startTask?.["commandLine"], + containerSettings: !p.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.startTask?.containerSettings?.["containerRunOptions"], + imageName: p.startTask?.containerSettings?.["imageName"], + registry: !p.startTask?.containerSettings?.registry + ? undefined + : { + username: + p.startTask?.containerSettings?.registry?.[ + "username" + ], + password: + p.startTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + p.startTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !p.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + p.startTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.startTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (p.startTask?.["resourceFiles"] ?? []).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + p.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + userIdentity: !p.startTask?.userIdentity + ? undefined + : { + username: p.startTask?.userIdentity?.["username"], + autoUser: !p.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: p.startTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + p.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: p.startTask?.["maxTaskRetryCount"], + waitForSuccess: p.startTask?.["waitForSuccess"], + }, + certificateReferences: (p["certificateReferences"] ?? []).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + p["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: p["applicationLicenses"], + taskSlotsPerNode: p["taskSlotsPerNode"], + taskSchedulingPolicy: !p.taskSchedulingPolicy + ? undefined + : { nodeFillType: p.taskSchedulingPolicy?.["nodeFillType"] }, + userAccounts: (p["userAccounts"] ?? []).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { loginMode: p.windowsUserConfiguration?.["loginMode"] }, + })), + metadata: (p["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + stats: !p.stats + ? undefined + : { + url: p.stats?.["url"], + startTime: new Date(p.stats?.["startTime"]), + lastUpdateTime: new Date(p.stats?.["lastUpdateTime"]), + usageStats: !p.stats?.usageStats + ? undefined + : { + startTime: new Date(p.stats?.usageStats?.["startTime"]), + lastUpdateTime: new Date( + p.stats?.usageStats?.["lastUpdateTime"] + ), + dedicatedCoreTime: p.stats?.usageStats?.["dedicatedCoreTime"], + }, + resourceStats: !p.stats?.resourceStats + ? undefined + : { + startTime: new Date(p.stats?.resourceStats?.["startTime"]), + lastUpdateTime: new Date( + p.stats?.resourceStats?.["lastUpdateTime"] + ), + avgCPUPercentage: + p.stats?.resourceStats?.["avgCPUPercentage"], + avgMemoryGiB: p.stats?.resourceStats?.["avgMemoryGiB"], + peakMemoryGiB: p.stats?.resourceStats?.["peakMemoryGiB"], + avgDiskGiB: p.stats?.resourceStats?.["avgDiskGiB"], + peakDiskGiB: p.stats?.resourceStats?.["peakDiskGiB"], + diskReadIOps: p.stats?.resourceStats?.["diskReadIOps"], + diskWriteIOps: p.stats?.resourceStats?.["diskWriteIOps"], + diskReadGiB: p.stats?.resourceStats?.["diskReadGiB"], + diskWriteGiB: p.stats?.resourceStats?.["diskWriteGiB"], + networkReadGiB: p.stats?.resourceStats?.["networkReadGiB"], + networkWriteGiB: p.stats?.resourceStats?.["networkWriteGiB"], + }, + }, + mountConfiguration: (p["mountConfiguration"] ?? []).map((p) => ({ + azureBlobFileSystemConfiguration: !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: p.azureBlobFileSystemConfiguration?.["accountName"], + containerName: + p.azureBlobFileSystemConfiguration?.["containerName"], + accountKey: p.azureBlobFileSystemConfiguration?.["accountKey"], + sasKey: p.azureBlobFileSystemConfiguration?.["sasKey"], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.["blobfuseOptions"], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.["relativeMountPath"], + identityReference: !p.azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration?.identityReference?.[ + "resourceId" + ], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: !p.azureFileShareConfiguration + ? undefined + : { + accountName: p.azureFileShareConfiguration?.["accountName"], + azureFileUrl: p.azureFileShareConfiguration?.["azureFileUrl"], + accountKey: p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.["relativeMountPath"], + mountOptions: p.azureFileShareConfiguration?.["mountOptions"], + }, + })), + identity: !p.identity + ? undefined + : { + type: p.identity?.["type"], + userAssignedIdentities: ( + p.identity?.["userAssignedIdentities"] ?? [] + ).map((p) => ({ + resourceId: p["resourceId"], + clientId: p["clientId"], + principalId: p["principalId"], + })), + }, + targetNodeCommunicationMode: p["targetNodeCommunicationMode"], + currentNodeCommunicationMode: p["currentNodeCommunicationMode"], + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists all of the Pools in the specified Account. */ +export async function listPools( + context: Client, + options: ListPoolsOptions = { requestOptions: {} } +): Promise { + const result = await _listPoolsSend(context, options); + return _listPoolsDeserialize(result); +} + +export function _deletePoolSend( + context: Client, + poolId: string, + options: DeletePoolOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}", poolId) + .delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _deletePoolDeserialize( + result: DeletePool202Response | DeletePoolDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * When you request that a Pool be deleted, the following actions occur: the Pool + * state is set to deleting; any ongoing resize operation on the Pool are stopped; + * the Batch service starts resizing the Pool to zero Compute Nodes; any Tasks + * running on existing Compute Nodes are terminated and requeued (as if a resize + * Pool operation had been requested with the default requeue option); finally, + * the Pool is removed from the system. Because running Tasks are requeued, the + * user can rerun these Tasks by updating their Job to target a different Pool. + * The Tasks can then run on the new Pool. If you want to override the requeue + * behavior, then you should call resize Pool explicitly to shrink the Pool to + * zero size before deleting the Pool. If you call an Update, Patch or Delete API + * on a Pool in the deleting state, it will fail with HTTP status code 409 with + * error code PoolBeingDeleted. + */ +export async function deletePool( + context: Client, + poolId: string, + options: DeletePoolOptions = { requestOptions: {} } +): Promise { + const result = await _deletePoolSend(context, poolId, options); + return _deletePoolDeserialize(result); +} + +export function _poolExistsSend( + context: Client, + poolId: string, + options: PoolExistsOptions = { requestOptions: {} } +): StreamableMethod< + PoolExists200Response | PoolExists404Response | PoolExistsDefaultResponse +> { + return context + .path("/pools/{poolId}", poolId) + .head({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _poolExistsDeserialize( + result: + | PoolExists200Response + | PoolExists404Response + | PoolExistsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Gets basic properties of a Pool. */ +export async function poolExists( + context: Client, + poolId: string, + options: PoolExistsOptions = { requestOptions: {} } +): Promise { + const result = await _poolExistsSend(context, poolId, options); + return _poolExistsDeserialize(result); +} + +export function _getPoolSend( + context: Client, + poolId: string, + options: GetPoolOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}", poolId) + .get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { + timeOut: options?.timeOut, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _getPoolDeserialize( + result: GetPool200Response | GetPoolDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + displayName: result.body["displayName"], + url: result.body["url"], + eTag: result.body["eTag"], + lastModified: + result.body["lastModified"] !== undefined + ? new Date(result.body["lastModified"]) + : undefined, + creationTime: + result.body["creationTime"] !== undefined + ? new Date(result.body["creationTime"]) + : undefined, + state: result.body["state"], + stateTransitionTime: + result.body["stateTransitionTime"] !== undefined + ? new Date(result.body["stateTransitionTime"]) + : undefined, + allocationState: result.body["allocationState"], + allocationStateTransitionTime: + result.body["allocationStateTransitionTime"] !== undefined + ? new Date(result.body["allocationStateTransitionTime"]) + : undefined, + vmSize: result.body["vmSize"], + cloudServiceConfiguration: !result.body.cloudServiceConfiguration + ? undefined + : { + osFamily: result.body.cloudServiceConfiguration?.["osFamily"], + osVersion: result.body.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !result.body.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + result.body.virtualMachineConfiguration?.imageReference[ + "publisher" + ], + offer: + result.body.virtualMachineConfiguration?.imageReference["offer"], + sku: result.body.virtualMachineConfiguration?.imageReference["sku"], + version: + result.body.virtualMachineConfiguration?.imageReference[ + "version" + ], + virtualMachineImageId: + result.body.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + exactVersion: + result.body.virtualMachineConfiguration?.imageReference[ + "exactVersion" + ], + }, + nodeAgentSKUId: + result.body.virtualMachineConfiguration?.["nodeAgentSKUId"], + windowsConfiguration: !result.body.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + result.body.virtualMachineConfiguration + ?.windowsConfiguration?.["enableAutomaticUpdates"], + }, + dataDisks: ( + result.body.virtualMachineConfiguration?.["dataDisks"] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: result.body.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !result.body.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: result.body.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + result.body.virtualMachineConfiguration + ?.containerConfiguration?.["containerImageNames"], + containerRegistries: ( + result.body.virtualMachineConfiguration + ?.containerConfiguration?.["containerRegistries"] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + }, + diskEncryptionConfiguration: !result.body.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + result.body.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.["targets"], + }, + nodePlacementConfiguration: !result.body.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + result.body.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + result.body.virtualMachineConfiguration?.["extensions"] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: p["provisionAfterExtensions"], + })), + osDisk: !result.body.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !result.body + .virtualMachineConfiguration?.osDisk?.ephemeralOSDiskSettings + ? undefined + : { + placement: + result.body.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings?.["placement"], + }, + }, + }, + resizeTimeout: result.body["resizeTimeout"], + resizeErrors: (result.body["resizeErrors"] ?? []).map((p) => ({ + code: p["code"], + message: p["message"], + values: (p["values"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + })), + currentDedicatedNodes: result.body["currentDedicatedNodes"], + currentLowPriorityNodes: result.body["currentLowPriorityNodes"], + targetDedicatedNodes: result.body["targetDedicatedNodes"], + targetLowPriorityNodes: result.body["targetLowPriorityNodes"], + enableAutoScale: result.body["enableAutoScale"], + autoScaleFormula: result.body["autoScaleFormula"], + autoScaleEvaluationInterval: result.body["autoScaleEvaluationInterval"], + autoScaleRun: !result.body.autoScaleRun + ? undefined + : { + timestamp: new Date(result.body.autoScaleRun?.["timestamp"]), + results: result.body.autoScaleRun?.["results"], + error: !result.body.autoScaleRun?.error + ? undefined + : { + code: result.body.autoScaleRun?.error?.["code"], + message: result.body.autoScaleRun?.error?.["message"], + values: (result.body.autoScaleRun?.error?.["values"] ?? []).map( + (p) => ({ name: p["name"], value: p["value"] }) + ), + }, + }, + enableInterNodeCommunication: result.body["enableInterNodeCommunication"], + networkConfiguration: !result.body.networkConfiguration + ? undefined + : { + subnetId: result.body.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + result.body.networkConfiguration?.["dynamicVNetAssignmentScope"], + endpointConfiguration: !result.body.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + result.body.networkConfiguration?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: p["frontendPortRangeStart"], + frontendPortRangeEnd: p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !result.body.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + result.body.networkConfiguration + ?.publicIPAddressConfiguration?.["provision"], + ipAddressIds: + result.body.networkConfiguration + ?.publicIPAddressConfiguration?.["ipAddressIds"], + }, + enableAcceleratedNetworking: + result.body.networkConfiguration?.["enableAcceleratedNetworking"], + }, + startTask: !result.body.startTask + ? undefined + : { + commandLine: result.body.startTask?.["commandLine"], + containerSettings: !result.body.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + result.body.startTask?.containerSettings?.["imageName"], + registry: !result.body.startTask?.containerSettings?.registry + ? undefined + : { + username: + result.body.startTask?.containerSettings?.registry?.[ + "username" + ], + password: + result.body.startTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + result.body.startTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !result.body.startTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + result.body.startTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + result.body.startTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: (result.body.startTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + result.body.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + userIdentity: !result.body.startTask?.userIdentity + ? undefined + : { + username: result.body.startTask?.userIdentity?.["username"], + autoUser: !result.body.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.startTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + result.body.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: result.body.startTask?.["maxTaskRetryCount"], + waitForSuccess: result.body.startTask?.["waitForSuccess"], + }, + certificateReferences: (result.body["certificateReferences"] ?? []).map( + (p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + }) + ), + applicationPackageReferences: ( + result.body["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: result.body["applicationLicenses"], + taskSlotsPerNode: result.body["taskSlotsPerNode"], + taskSchedulingPolicy: !result.body.taskSchedulingPolicy + ? undefined + : { nodeFillType: result.body.taskSchedulingPolicy?.["nodeFillType"] }, + userAccounts: (result.body["userAccounts"] ?? []).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { loginMode: p.windowsUserConfiguration?.["loginMode"] }, + })), + metadata: (result.body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + stats: !result.body.stats + ? undefined + : { + url: result.body.stats?.["url"], + startTime: new Date(result.body.stats?.["startTime"]), + lastUpdateTime: new Date(result.body.stats?.["lastUpdateTime"]), + usageStats: !result.body.stats?.usageStats + ? undefined + : { + startTime: new Date( + result.body.stats?.usageStats?.["startTime"] + ), + lastUpdateTime: new Date( + result.body.stats?.usageStats?.["lastUpdateTime"] + ), + dedicatedCoreTime: + result.body.stats?.usageStats?.["dedicatedCoreTime"], + }, + resourceStats: !result.body.stats?.resourceStats + ? undefined + : { + startTime: new Date( + result.body.stats?.resourceStats?.["startTime"] + ), + lastUpdateTime: new Date( + result.body.stats?.resourceStats?.["lastUpdateTime"] + ), + avgCPUPercentage: + result.body.stats?.resourceStats?.["avgCPUPercentage"], + avgMemoryGiB: + result.body.stats?.resourceStats?.["avgMemoryGiB"], + peakMemoryGiB: + result.body.stats?.resourceStats?.["peakMemoryGiB"], + avgDiskGiB: result.body.stats?.resourceStats?.["avgDiskGiB"], + peakDiskGiB: result.body.stats?.resourceStats?.["peakDiskGiB"], + diskReadIOps: + result.body.stats?.resourceStats?.["diskReadIOps"], + diskWriteIOps: + result.body.stats?.resourceStats?.["diskWriteIOps"], + diskReadGiB: result.body.stats?.resourceStats?.["diskReadGiB"], + diskWriteGiB: + result.body.stats?.resourceStats?.["diskWriteGiB"], + networkReadGiB: + result.body.stats?.resourceStats?.["networkReadGiB"], + networkWriteGiB: + result.body.stats?.resourceStats?.["networkWriteGiB"], + }, + }, + mountConfiguration: (result.body["mountConfiguration"] ?? []).map((p) => ({ + azureBlobFileSystemConfiguration: !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: p.azureBlobFileSystemConfiguration?.["accountName"], + containerName: + p.azureBlobFileSystemConfiguration?.["containerName"], + accountKey: p.azureBlobFileSystemConfiguration?.["accountKey"], + sasKey: p.azureBlobFileSystemConfiguration?.["sasKey"], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.["blobfuseOptions"], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.["relativeMountPath"], + identityReference: !p.azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration?.identityReference?.[ + "resourceId" + ], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: !p.azureFileShareConfiguration + ? undefined + : { + accountName: p.azureFileShareConfiguration?.["accountName"], + azureFileUrl: p.azureFileShareConfiguration?.["azureFileUrl"], + accountKey: p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.["relativeMountPath"], + mountOptions: p.azureFileShareConfiguration?.["mountOptions"], + }, + })), + identity: !result.body.identity + ? undefined + : { + type: result.body.identity?.["type"], + userAssignedIdentities: ( + result.body.identity?.["userAssignedIdentities"] ?? [] + ).map((p) => ({ + resourceId: p["resourceId"], + clientId: p["clientId"], + principalId: p["principalId"], + })), + }, + targetNodeCommunicationMode: result.body["targetNodeCommunicationMode"], + currentNodeCommunicationMode: result.body["currentNodeCommunicationMode"], + }; +} + +/** Gets information about the specified Pool. */ +export async function getPool( + context: Client, + poolId: string, + options: GetPoolOptions = { requestOptions: {} } +): Promise { + const result = await _getPoolSend(context, poolId, options); + return _getPoolDeserialize(result); +} + +export function _updatePoolSend( + context: Client, + poolId: string, + body: BatchPoolUpdateOptions, + options: UpdatePoolOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}", poolId) + .patch({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + startTask: !body.startTask + ? undefined + : { + commandLine: body.startTask?.["commandLine"], + containerSettings: !body.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + body.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: body.startTask?.containerSettings?.["imageName"], + registry: !body.startTask?.containerSettings?.registry + ? undefined + : { + username: + body.startTask?.containerSettings?.registry?.[ + "username" + ], + password: + body.startTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + body.startTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.startTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.startTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (body.startTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + body.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + userIdentity: !body.startTask?.userIdentity + ? undefined + : { + username: body.startTask?.userIdentity?.["username"], + autoUser: !body.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.startTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: body.startTask?.["maxTaskRetryCount"], + waitForSuccess: body.startTask?.["waitForSuccess"], + }, + certificateReferences: (body["certificateReferences"] ?? []).map( + (p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + }) + ), + applicationPackageReferences: ( + body["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + targetNodeCommunicationMode: body["targetNodeCommunicationMode"], + }, + }); +} + +export async function _updatePoolDeserialize( + result: UpdatePool200Response | UpdatePoolDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This only replaces the Pool properties specified in the request. For example, + * if the Pool has a StartTask associated with it, and a request does not specify + * a StartTask element, then the Pool keeps the existing StartTask. + */ +export async function updatePool( + context: Client, + poolId: string, + body: BatchPoolUpdateOptions, + options: UpdatePoolOptions = { requestOptions: {} } +): Promise { + const result = await _updatePoolSend(context, poolId, body, options); + return _updatePoolDeserialize(result); +} + +export function _disablePoolAutoScaleSend( + context: Client, + poolId: string, + options: DisablePoolAutoScaleOptions = { requestOptions: {} } +): StreamableMethod< + DisablePoolAutoScale200Response | DisablePoolAutoScaleDefaultResponse +> { + return context + .path("/pools/{poolId}/disableautoscale", poolId) + .post({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _disablePoolAutoScaleDeserialize( + result: DisablePoolAutoScale200Response | DisablePoolAutoScaleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Disables automatic scaling for a Pool. */ +export async function disablePoolAutoScale( + context: Client, + poolId: string, + options: DisablePoolAutoScaleOptions = { requestOptions: {} } +): Promise { + const result = await _disablePoolAutoScaleSend(context, poolId, options); + return _disablePoolAutoScaleDeserialize(result); +} + +export function _enablePoolAutoScaleSend( + context: Client, + poolId: string, + body: BatchPoolEnableAutoScaleOptions, + options: EnablePoolAutoScaleOptions = { requestOptions: {} } +): StreamableMethod< + EnablePoolAutoScale200Response | EnablePoolAutoScaleDefaultResponse +> { + return context + .path("/pools/{poolId}/enableautoscale", poolId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + autoScaleFormula: body["autoScaleFormula"], + autoScaleEvaluationInterval: body["autoScaleEvaluationInterval"], + }, + }); +} + +export async function _enablePoolAutoScaleDeserialize( + result: EnablePoolAutoScale200Response | EnablePoolAutoScaleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * You cannot enable automatic scaling on a Pool if a resize operation is in + * progress on the Pool. If automatic scaling of the Pool is currently disabled, + * you must specify a valid autoscale formula as part of the request. If automatic + * scaling of the Pool is already enabled, you may specify a new autoscale formula + * and/or a new evaluation interval. You cannot call this API for the same Pool + * more than once every 30 seconds. + */ +export async function enablePoolAutoScale( + context: Client, + poolId: string, + body: BatchPoolEnableAutoScaleOptions, + options: EnablePoolAutoScaleOptions = { requestOptions: {} } +): Promise { + const result = await _enablePoolAutoScaleSend(context, poolId, body, options); + return _enablePoolAutoScaleDeserialize(result); +} + +export function _evaluatePoolAutoScaleSend( + context: Client, + poolId: string, + body: BatchPoolEvaluateAutoScaleOptions, + options: EvaluatePoolAutoScaleOptions = { requestOptions: {} } +): StreamableMethod< + EvaluatePoolAutoScale200Response | EvaluatePoolAutoScaleDefaultResponse +> { + return context + .path("/pools/{poolId}/evaluateautoscale", poolId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { autoScaleFormula: body["autoScaleFormula"] }, + }); +} + +export async function _evaluatePoolAutoScaleDeserialize( + result: + | EvaluatePoolAutoScale200Response + | EvaluatePoolAutoScaleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + timestamp: new Date(result.body["timestamp"]), + results: result.body["results"], + error: !result.body.error + ? undefined + : { + code: result.body.error?.["code"], + message: result.body.error?.["message"], + values: (result.body.error?.["values"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }; +} + +/** + * This API is primarily for validating an autoscale formula, as it simply returns + * the result without applying the formula to the Pool. The Pool must have auto + * scaling enabled in order to evaluate a formula. + */ +export async function evaluatePoolAutoScale( + context: Client, + poolId: string, + body: BatchPoolEvaluateAutoScaleOptions, + options: EvaluatePoolAutoScaleOptions = { requestOptions: {} } +): Promise { + const result = await _evaluatePoolAutoScaleSend( + context, + poolId, + body, + options + ); + return _evaluatePoolAutoScaleDeserialize(result); +} + +export function _resizePoolSend( + context: Client, + poolId: string, + body: BatchPoolResizeOptions, + options: ResizePoolOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/resize", poolId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + targetDedicatedNodes: body["targetDedicatedNodes"], + targetLowPriorityNodes: body["targetLowPriorityNodes"], + resizeTimeout: body["resizeTimeout"], + nodeDeallocationOption: body["nodeDeallocationOption"], + }, + }); +} + +export async function _resizePoolDeserialize( + result: ResizePool202Response | ResizePoolDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * You can only resize a Pool when its allocation state is steady. If the Pool is + * already resizing, the request fails with status code 409. When you resize a + * Pool, the Pool's allocation state changes from steady to resizing. You cannot + * resize Pools which are configured for automatic scaling. If you try to do this, + * the Batch service returns an error 409. If you resize a Pool downwards, the + * Batch service chooses which Compute Nodes to remove. To remove specific Compute + * Nodes, use the Pool remove Compute Nodes API instead. + */ +export async function resizePool( + context: Client, + poolId: string, + body: BatchPoolResizeOptions, + options: ResizePoolOptions = { requestOptions: {} } +): Promise { + const result = await _resizePoolSend(context, poolId, body, options); + return _resizePoolDeserialize(result); +} + +export function _stopPoolResizeSend( + context: Client, + poolId: string, + options: StopPoolResizeOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/stopresize", poolId) + .post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _stopPoolResizeDeserialize( + result: StopPoolResize202Response | StopPoolResizeDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This does not restore the Pool to its previous state before the resize + * operation: it only stops any further changes being made, and the Pool maintains + * its current state. After stopping, the Pool stabilizes at the number of Compute + * Nodes it was at when the stop operation was done. During the stop operation, + * the Pool allocation state changes first to stopping and then to steady. A + * resize operation need not be an explicit resize Pool request; this API can also + * be used to halt the initial sizing of the Pool when it is created. + */ +export async function stopPoolResize( + context: Client, + poolId: string, + options: StopPoolResizeOptions = { requestOptions: {} } +): Promise { + const result = await _stopPoolResizeSend(context, poolId, options); + return _stopPoolResizeDeserialize(result); +} + +export function _replacePoolPropertiesSend( + context: Client, + poolId: string, + body: BatchPoolReplaceOptions, + options: ReplacePoolPropertiesOptions = { requestOptions: {} } +): StreamableMethod< + ReplacePoolProperties204Response | ReplacePoolPropertiesDefaultResponse +> { + return context + .path("/pools/{poolId}/updateproperties", poolId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + startTask: !body.startTask + ? undefined + : { + commandLine: body.startTask?.["commandLine"], + containerSettings: !body.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + body.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: body.startTask?.containerSettings?.["imageName"], + registry: !body.startTask?.containerSettings?.registry + ? undefined + : { + username: + body.startTask?.containerSettings?.registry?.[ + "username" + ], + password: + body.startTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + body.startTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.startTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.startTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (body.startTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + body.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + userIdentity: !body.startTask?.userIdentity + ? undefined + : { + username: body.startTask?.userIdentity?.["username"], + autoUser: !body.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.startTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: body.startTask?.["maxTaskRetryCount"], + waitForSuccess: body.startTask?.["waitForSuccess"], + }, + certificateReferences: (body["certificateReferences"] ?? []).map( + (p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + }) + ), + applicationPackageReferences: ( + body["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + targetNodeCommunicationMode: body["targetNodeCommunicationMode"], + }, + }); +} + +export async function _replacePoolPropertiesDeserialize( + result: + | ReplacePoolProperties204Response + | ReplacePoolPropertiesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This fully replaces all the updatable properties of the Pool. For example, if + * the Pool has a StartTask associated with it and if StartTask is not specified + * with this request, then the Batch service will remove the existing StartTask. + */ +export async function replacePoolProperties( + context: Client, + poolId: string, + body: BatchPoolReplaceOptions, + options: ReplacePoolPropertiesOptions = { requestOptions: {} } +): Promise { + const result = await _replacePoolPropertiesSend( + context, + poolId, + body, + options + ); + return _replacePoolPropertiesDeserialize(result); +} + +export function _removeNodesSend( + context: Client, + poolId: string, + body: NodeRemoveOptions, + options: RemoveNodesOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/removenodes", poolId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + nodeList: body["nodeList"], + resizeTimeout: body["resizeTimeout"], + nodeDeallocationOption: body["nodeDeallocationOption"], + }, + }); +} + +export async function _removeNodesDeserialize( + result: RemoveNodes202Response | RemoveNodesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This operation can only run when the allocation state of the Pool is steady. + * When this operation runs, the allocation state changes from steady to resizing. + * Each request may remove up to 100 nodes. + */ +export async function removeNodes( + context: Client, + poolId: string, + body: NodeRemoveOptions, + options: RemoveNodesOptions = { requestOptions: {} } +): Promise { + const result = await _removeNodesSend(context, poolId, body, options); + return _removeNodesDeserialize(result); +} + +export function _listSupportedImagesSend( + context: Client, + options: ListSupportedImagesOptions = { requestOptions: {} } +): StreamableMethod< + ListSupportedImages200Response | ListSupportedImagesDefaultResponse +> { + return context + .path("/supportedimages") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + }, + }); +} + +export async function _listSupportedImagesDeserialize( + result: ListSupportedImages200Response | ListSupportedImagesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + nodeAgentSKUId: p["nodeAgentSKUId"], + imageReference: { + publisher: p.imageReference["publisher"], + offer: p.imageReference["offer"], + sku: p.imageReference["sku"], + version: p.imageReference["version"], + virtualMachineImageId: p.imageReference["virtualMachineImageId"], + exactVersion: p.imageReference["exactVersion"], + }, + osType: p["osType"], + capabilities: p["capabilities"], + batchSupportEndOfLife: + p["batchSupportEndOfLife"] !== undefined + ? new Date(p["batchSupportEndOfLife"]) + : undefined, + verificationType: p["verificationType"], + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists all Virtual Machine Images supported by the Azure Batch service. */ +export async function listSupportedImages( + context: Client, + options: ListSupportedImagesOptions = { requestOptions: {} } +): Promise { + const result = await _listSupportedImagesSend(context, options); + return _listSupportedImagesDeserialize(result); +} + +export function _listPoolNodeCountsSend( + context: Client, + options: ListPoolNodeCountsOptions = { requestOptions: {} } +): StreamableMethod< + ListPoolNodeCounts200Response | ListPoolNodeCountsDefaultResponse +> { + return context + .path("/nodecounts") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + }, + }); +} + +export async function _listPoolNodeCountsDeserialize( + result: ListPoolNodeCounts200Response | ListPoolNodeCountsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + poolId: p["poolId"], + dedicated: !p.dedicated + ? undefined + : { + creating: p.dedicated?.["creating"], + idle: p.dedicated?.["idle"], + offline: p.dedicated?.["offline"], + preempted: p.dedicated?.["preempted"], + rebooting: p.dedicated?.["rebooting"], + reimaging: p.dedicated?.["reimaging"], + running: p.dedicated?.["running"], + starting: p.dedicated?.["starting"], + startTaskFailed: p.dedicated?.["startTaskFailed"], + leavingPool: p.dedicated?.["leavingPool"], + unknown: p.dedicated?.["unknown"], + unusable: p.dedicated?.["unusable"], + waitingForStartTask: p.dedicated?.["waitingForStartTask"], + total: p.dedicated?.["total"], + }, + lowPriority: !p.lowPriority + ? undefined + : { + creating: p.lowPriority?.["creating"], + idle: p.lowPriority?.["idle"], + offline: p.lowPriority?.["offline"], + preempted: p.lowPriority?.["preempted"], + rebooting: p.lowPriority?.["rebooting"], + reimaging: p.lowPriority?.["reimaging"], + running: p.lowPriority?.["running"], + starting: p.lowPriority?.["starting"], + startTaskFailed: p.lowPriority?.["startTaskFailed"], + leavingPool: p.lowPriority?.["leavingPool"], + unknown: p.lowPriority?.["unknown"], + unusable: p.lowPriority?.["unusable"], + waitingForStartTask: p.lowPriority?.["waitingForStartTask"], + total: p.lowPriority?.["total"], + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** + * Gets the number of Compute Nodes in each state, grouped by Pool. Note that the + * numbers returned may not always be up to date. If you need exact node counts, + * use a list query. + */ +export async function listPoolNodeCounts( + context: Client, + options: ListPoolNodeCountsOptions = { requestOptions: {} } +): Promise { + const result = await _listPoolNodeCountsSend(context, options); + return _listPoolNodeCountsDeserialize(result); +} + +export function _deleteJobSend( + context: Client, + jobId: string, + options: DeleteJobOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}", jobId) + .delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _deleteJobDeserialize( + result: DeleteJob202Response | DeleteJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * Deleting a Job also deletes all Tasks that are part of that Job, and all Job + * statistics. This also overrides the retention period for Task data; that is, if + * the Job contains Tasks which are still retained on Compute Nodes, the Batch + * services deletes those Tasks' working directories and all their contents. When + * a Delete Job request is received, the Batch service sets the Job to the + * deleting state. All update operations on a Job that is in deleting state will + * fail with status code 409 (Conflict), with additional information indicating + * that the Job is being deleted. + */ +export async function deleteJob( + context: Client, + jobId: string, + options: DeleteJobOptions = { requestOptions: {} } +): Promise { + const result = await _deleteJobSend(context, jobId, options); + return _deleteJobDeserialize(result); +} + +export function _getJobSend( + context: Client, + jobId: string, + options: GetJobOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}", jobId) + .get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { + timeOut: options?.timeOut, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _getJobDeserialize( + result: GetJob200Response | GetJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + displayName: result.body["displayName"], + usesTaskDependencies: result.body["usesTaskDependencies"], + url: result.body["url"], + eTag: result.body["eTag"], + lastModified: + result.body["lastModified"] !== undefined + ? new Date(result.body["lastModified"]) + : undefined, + creationTime: + result.body["creationTime"] !== undefined + ? new Date(result.body["creationTime"]) + : undefined, + state: result.body["state"], + stateTransitionTime: + result.body["stateTransitionTime"] !== undefined + ? new Date(result.body["stateTransitionTime"]) + : undefined, + previousState: result.body["previousState"], + previousStateTransitionTime: + result.body["previousStateTransitionTime"] !== undefined + ? new Date(result.body["previousStateTransitionTime"]) + : undefined, + priority: result.body["priority"], + allowTaskPreemption: result.body["allowTaskPreemption"], + maxParallelTasks: result.body["maxParallelTasks"], + constraints: !result.body.constraints + ? undefined + : { + maxWallClockTime: result.body.constraints?.["maxWallClockTime"], + maxTaskRetryCount: result.body.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !result.body.jobManagerTask + ? undefined + : { + id: result.body.jobManagerTask?.["id"], + displayName: result.body.jobManagerTask?.["displayName"], + commandLine: result.body.jobManagerTask?.["commandLine"], + containerSettings: !result.body.jobManagerTask?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.jobManagerTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + result.body.jobManagerTask?.containerSettings?.["imageName"], + registry: !result.body.jobManagerTask?.containerSettings + ?.registry + ? undefined + : { + username: + result.body.jobManagerTask?.containerSettings + ?.registry?.["username"], + password: + result.body.jobManagerTask?.containerSettings + ?.registry?.["password"], + registryServer: + result.body.jobManagerTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !result.body.jobManagerTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + result.body.jobManagerTask?.containerSettings + ?.registry?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + result.body.jobManagerTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + result.body.jobManagerTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: (result.body.jobManagerTask?.["outputFiles"] ?? []).map( + (p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + }) + ), + environmentSettings: ( + result.body.jobManagerTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !result.body.jobManagerTask?.constraints + ? undefined + : { + maxWallClockTime: + result.body.jobManagerTask?.constraints?.["maxWallClockTime"], + retentionTime: + result.body.jobManagerTask?.constraints?.["retentionTime"], + maxTaskRetryCount: + result.body.jobManagerTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + requiredSlots: result.body.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: + result.body.jobManagerTask?.["killJobOnCompletion"], + userIdentity: !result.body.jobManagerTask?.userIdentity + ? undefined + : { + username: + result.body.jobManagerTask?.userIdentity?.["username"], + autoUser: !result.body.jobManagerTask?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.jobManagerTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + result.body.jobManagerTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + runExclusive: result.body.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + result.body.jobManagerTask?.["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !result.body.jobManagerTask + ?.authenticationTokenSettings + ? undefined + : { + access: + result.body.jobManagerTask?.authenticationTokenSettings?.[ + "access" + ], + }, + allowLowPriorityNode: + result.body.jobManagerTask?.["allowLowPriorityNode"], + }, + jobPreparationTask: !result.body.jobPreparationTask + ? undefined + : { + id: result.body.jobPreparationTask?.["id"], + commandLine: result.body.jobPreparationTask?.["commandLine"], + containerSettings: !result.body.jobPreparationTask?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.jobPreparationTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + result.body.jobPreparationTask?.containerSettings?.[ + "imageName" + ], + registry: !result.body.jobPreparationTask?.containerSettings + ?.registry + ? undefined + : { + username: + result.body.jobPreparationTask?.containerSettings + ?.registry?.["username"], + password: + result.body.jobPreparationTask?.containerSettings + ?.registry?.["password"], + registryServer: + result.body.jobPreparationTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !result.body.jobPreparationTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + result.body.jobPreparationTask?.containerSettings + ?.registry?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + result.body.jobPreparationTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + result.body.jobPreparationTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + result.body.jobPreparationTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !result.body.jobPreparationTask?.constraints + ? undefined + : { + maxWallClockTime: + result.body.jobPreparationTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + result.body.jobPreparationTask?.constraints?.[ + "retentionTime" + ], + maxTaskRetryCount: + result.body.jobPreparationTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + waitForSuccess: result.body.jobPreparationTask?.["waitForSuccess"], + userIdentity: !result.body.jobPreparationTask?.userIdentity + ? undefined + : { + username: + result.body.jobPreparationTask?.userIdentity?.["username"], + autoUser: !result.body.jobPreparationTask?.userIdentity + ?.autoUser + ? undefined + : { + scope: + result.body.jobPreparationTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + result.body.jobPreparationTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + rerunOnNodeRebootAfterSuccess: + result.body.jobPreparationTask?.["rerunOnNodeRebootAfterSuccess"], + }, + jobReleaseTask: !result.body.jobReleaseTask + ? undefined + : { + id: result.body.jobReleaseTask?.["id"], + commandLine: result.body.jobReleaseTask?.["commandLine"], + containerSettings: !result.body.jobReleaseTask?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.jobReleaseTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + result.body.jobReleaseTask?.containerSettings?.["imageName"], + registry: !result.body.jobReleaseTask?.containerSettings + ?.registry + ? undefined + : { + username: + result.body.jobReleaseTask?.containerSettings + ?.registry?.["username"], + password: + result.body.jobReleaseTask?.containerSettings + ?.registry?.["password"], + registryServer: + result.body.jobReleaseTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !result.body.jobReleaseTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + result.body.jobReleaseTask?.containerSettings + ?.registry?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + result.body.jobReleaseTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + result.body.jobReleaseTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + result.body.jobReleaseTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: result.body.jobReleaseTask?.["maxWallClockTime"], + retentionTime: result.body.jobReleaseTask?.["retentionTime"], + userIdentity: !result.body.jobReleaseTask?.userIdentity + ? undefined + : { + username: + result.body.jobReleaseTask?.userIdentity?.["username"], + autoUser: !result.body.jobReleaseTask?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.jobReleaseTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + result.body.jobReleaseTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + }, + commonEnvironmentSettings: ( + result.body["commonEnvironmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + poolInfo: { + poolId: result.body.poolInfo["poolId"], + autoPoolSpecification: !result.body.poolInfo.autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + result.body.poolInfo.autoPoolSpecification?.["autoPoolIdPrefix"], + poolLifetimeOption: + result.body.poolInfo.autoPoolSpecification?.[ + "poolLifetimeOption" + ], + keepAlive: + result.body.poolInfo.autoPoolSpecification?.["keepAlive"], + pool: !result.body.poolInfo.autoPoolSpecification?.pool + ? undefined + : { + displayName: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "displayName" + ], + vmSize: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "vmSize" + ], + cloudServiceConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool?.cloudServiceConfiguration + ? undefined + : { + osFamily: + result.body.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + result.body.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "publisher" + ], + offer: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "offer" + ], + sku: result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "version" + ], + virtualMachineImageId: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + exactVersion: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "exactVersion" + ], + }, + nodeAgentSKUId: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["nodeAgentSKUId"], + windowsConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["dataDisks"] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.containerConfiguration + ? undefined + : { + type: result.body.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + }, + diskEncryptionConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.["targets"], + }, + nodePlacementConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + result.body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["extensions"] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !result.body.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !result.body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + result.body.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.osDisk?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "taskSlotsPerNode" + ], + taskSchedulingPolicy: !result.body.poolInfo + .autoPoolSpecification?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + result.body.poolInfo.autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "resizeTimeout" + ], + targetDedicatedNodes: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "targetDedicatedNodes" + ], + targetLowPriorityNodes: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "targetLowPriorityNodes" + ], + enableAutoScale: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "enableAutoScale" + ], + autoScaleFormula: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleFormula" + ], + autoScaleEvaluationInterval: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleEvaluationInterval" + ], + enableInterNodeCommunication: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "enableInterNodeCommunication" + ], + networkConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ? undefined + : { + subnetId: + result.body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + result.body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + result.body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !result.body.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + result.body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.["provision"], + ipAddressIds: + result.body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + result.body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask + ? undefined + : { + commandLine: + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["commandLine"], + containerSettings: !result.body.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.["imageName"], + registry: !result.body.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ? undefined + : { + username: + result.body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["username"], + password: + result.body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + result.body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !result.body.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + result.body.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + userIdentity: !result.body.poolInfo + .autoPoolSpecification?.pool?.startTask?.userIdentity + ? undefined + : { + username: + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.["username"], + autoUser: !result.body.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + result.body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + maxTaskRetryCount: + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["maxTaskRetryCount"], + waitForSuccess: + result.body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["waitForSuccess"], + }, + certificateReferences: ( + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "certificateReferences" + ] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "applicationLicenses" + ], + userAccounts: ( + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "userAccounts" + ] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "metadata" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "mountConfiguration" + ] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.["sasKey"], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.["accountName"], + azureFileUrl: + p.azureFileShareConfiguration?.["azureFileUrl"], + accountKey: + p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.["mountOptions"], + }, + })), + targetNodeCommunicationMode: + result.body.poolInfo.autoPoolSpecification?.pool?.[ + "targetNodeCommunicationMode" + ], + }, + }, + }, + onAllTasksComplete: result.body["onAllTasksComplete"], + onTaskFailure: result.body["onTaskFailure"], + networkConfiguration: !result.body.networkConfiguration + ? undefined + : { subnetId: result.body.networkConfiguration?.["subnetId"] }, + metadata: (result.body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + executionInfo: !result.body.executionInfo + ? undefined + : { + startTime: new Date(result.body.executionInfo?.["startTime"]), + endTime: + result.body.executionInfo?.["endTime"] !== undefined + ? new Date(result.body.executionInfo?.["endTime"]) + : undefined, + poolId: result.body.executionInfo?.["poolId"], + schedulingError: !result.body.executionInfo?.schedulingError + ? undefined + : { + category: + result.body.executionInfo?.schedulingError?.["category"], + code: result.body.executionInfo?.schedulingError?.["code"], + message: + result.body.executionInfo?.schedulingError?.["message"], + details: ( + result.body.executionInfo?.schedulingError?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + terminateReason: result.body.executionInfo?.["terminateReason"], + }, + stats: !result.body.stats + ? undefined + : { + url: result.body.stats?.["url"], + startTime: new Date(result.body.stats?.["startTime"]), + lastUpdateTime: new Date(result.body.stats?.["lastUpdateTime"]), + userCPUTime: result.body.stats?.["userCPUTime"], + kernelCPUTime: result.body.stats?.["kernelCPUTime"], + wallClockTime: result.body.stats?.["wallClockTime"], + readIOps: result.body.stats?.["readIOps"], + writeIOps: result.body.stats?.["writeIOps"], + readIOGiB: result.body.stats?.["readIOGiB"], + writeIOGiB: result.body.stats?.["writeIOGiB"], + numSucceededTasks: result.body.stats?.["numSucceededTasks"], + numFailedTasks: result.body.stats?.["numFailedTasks"], + numTaskRetries: result.body.stats?.["numTaskRetries"], + waitTime: result.body.stats?.["waitTime"], + }, + }; +} + +/** Gets information about the specified Job. */ +export async function getJob( + context: Client, + jobId: string, + options: GetJobOptions = { requestOptions: {} } +): Promise { + const result = await _getJobSend(context, jobId, options); + return _getJobDeserialize(result); +} + +export function _updateJobSend( + context: Client, + jobId: string, + body: BatchJobUpdateOptions, + options: UpdateJobOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}", jobId) + .patch({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + priority: body["priority"], + allowTaskPreemption: body["allowTaskPreemption"], + maxParallelTasks: body["maxParallelTasks"], + constraints: !body.constraints + ? undefined + : { + maxWallClockTime: body.constraints?.["maxWallClockTime"], + maxTaskRetryCount: body.constraints?.["maxTaskRetryCount"], + }, + poolInfo: !body.poolInfo + ? undefined + : { + poolId: body.poolInfo?.["poolId"], + autoPoolSpecification: !body.poolInfo?.autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + body.poolInfo?.autoPoolSpecification?.[ + "autoPoolIdPrefix" + ], + poolLifetimeOption: + body.poolInfo?.autoPoolSpecification?.[ + "poolLifetimeOption" + ], + keepAlive: + body.poolInfo?.autoPoolSpecification?.["keepAlive"], + pool: !body.poolInfo?.autoPoolSpecification?.pool + ? undefined + : { + displayName: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "displayName" + ], + vmSize: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "vmSize" + ], + cloudServiceConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.cloudServiceConfiguration + ? undefined + : { + osFamily: + body.poolInfo?.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + body.poolInfo?.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + body.poolInfo?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["publisher"], + offer: + body.poolInfo?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["offer"], + sku: body.poolInfo?.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.imageReference["sku"], + version: + body.poolInfo?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["version"], + virtualMachineImageId: + body.poolInfo?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["virtualMachineImageId"], + }, + nodeAgentSKUId: + body.poolInfo?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "nodeAgentSKUId" + ], + windowsConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + body.poolInfo?.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + body.poolInfo?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "dataDisks" + ] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + body.poolInfo?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "licenseType" + ], + containerConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: body.poolInfo?.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + body.poolInfo?.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + body.poolInfo?.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.[ + "resourceId" + ], + }, + })), + }, + diskEncryptionConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + body.poolInfo?.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.[ + "targets" + ], + }, + nodePlacementConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + body.poolInfo?.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.[ + "policy" + ], + }, + extensions: ( + body.poolInfo?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "extensions" + ] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: + p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !body.poolInfo?.autoPoolSpecification + ?.pool?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + body.poolInfo + ?.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.osDisk + ?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "taskSlotsPerNode" + ], + taskSchedulingPolicy: !body.poolInfo + ?.autoPoolSpecification?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + body.poolInfo?.autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "resizeTimeout" + ], + targetDedicatedNodes: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "targetDedicatedNodes" + ], + targetLowPriorityNodes: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "targetLowPriorityNodes" + ], + enableAutoScale: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "enableAutoScale" + ], + autoScaleFormula: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "autoScaleFormula" + ], + autoScaleEvaluationInterval: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "autoScaleEvaluationInterval" + ], + enableInterNodeCommunication: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "enableInterNodeCommunication" + ], + networkConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool?.networkConfiguration + ? undefined + : { + subnetId: + body.poolInfo?.autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + body.poolInfo?.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.networkConfiguration?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + body.poolInfo?.autoPoolSpecification + ?.pool?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: + p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + body.poolInfo?.autoPoolSpecification + ?.pool?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + body.poolInfo?.autoPoolSpecification + ?.pool?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + body.poolInfo?.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !body.poolInfo?.autoPoolSpecification?.pool + ?.startTask + ? undefined + : { + commandLine: + body.poolInfo?.autoPoolSpecification?.pool + ?.startTask?.["commandLine"], + containerSettings: !body.poolInfo + ?.autoPoolSpecification?.pool?.startTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.poolInfo?.autoPoolSpecification + ?.pool?.startTask + ?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.poolInfo?.autoPoolSpecification + ?.pool?.startTask + ?.containerSettings?.["imageName"], + registry: !body.poolInfo + ?.autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.poolInfo + ?.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["username"], + password: + body.poolInfo + ?.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + body.poolInfo + ?.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !body.poolInfo + ?.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.poolInfo + ?.autoPoolSpecification + ?.pool?.startTask + ?.containerSettings + ?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.poolInfo?.autoPoolSpecification + ?.pool?.startTask + ?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + body.poolInfo?.autoPoolSpecification?.pool + ?.startTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + body.poolInfo?.autoPoolSpecification?.pool + ?.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !body.poolInfo + ?.autoPoolSpecification?.pool?.startTask + ?.userIdentity + ? undefined + : { + username: + body.poolInfo?.autoPoolSpecification + ?.pool?.startTask?.userIdentity?.[ + "username" + ], + autoUser: !body.poolInfo + ?.autoPoolSpecification?.pool?.startTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.poolInfo + ?.autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.poolInfo + ?.autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + maxTaskRetryCount: + body.poolInfo?.autoPoolSpecification?.pool + ?.startTask?.["maxTaskRetryCount"], + waitForSuccess: + body.poolInfo?.autoPoolSpecification?.pool + ?.startTask?.["waitForSuccess"], + }, + certificateReferences: ( + body.poolInfo?.autoPoolSpecification?.pool?.[ + "certificateReferences" + ] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + body.poolInfo?.autoPoolSpecification?.pool?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "applicationLicenses" + ], + userAccounts: ( + body.poolInfo?.autoPoolSpecification?.pool?.[ + "userAccounts" + ] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: + !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + body.poolInfo?.autoPoolSpecification?.pool?.[ + "metadata" + ] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + mountConfiguration: ( + body.poolInfo?.autoPoolSpecification?.pool?.[ + "mountConfiguration" + ] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.[ + "sasKey" + ], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.[ + "resourceId" + ], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: + p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: + p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.[ + "accountName" + ], + azureFileUrl: + p.azureFileShareConfiguration?.[ + "azureFileUrl" + ], + accountKey: + p.azureFileShareConfiguration?.[ + "accountKey" + ], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.[ + "mountOptions" + ], + }, + })), + targetNodeCommunicationMode: + body.poolInfo?.autoPoolSpecification?.pool?.[ + "targetNodeCommunicationMode" + ], + }, + }, + }, + onAllTasksComplete: body["onAllTasksComplete"], + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }); +} + +export async function _updateJobDeserialize( + result: UpdateJob200Response | UpdateJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This replaces only the Job properties specified in the request. For example, if + * the Job has constraints, and a request does not specify the constraints + * element, then the Job keeps the existing constraints. + */ +export async function updateJob( + context: Client, + jobId: string, + body: BatchJobUpdateOptions, + options: UpdateJobOptions = { requestOptions: {} } +): Promise { + const result = await _updateJobSend(context, jobId, body, options); + return _updateJobDeserialize(result); +} + +export function _replaceJobSend( + context: Client, + jobId: string, + body: BatchJob, + options: ReplaceJobOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}", jobId) + .put({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + priority: body["priority"], + allowTaskPreemption: body["allowTaskPreemption"], + maxParallelTasks: body["maxParallelTasks"], + constraints: !body.constraints + ? undefined + : { + maxWallClockTime: body.constraints?.["maxWallClockTime"], + maxTaskRetryCount: body.constraints?.["maxTaskRetryCount"], + }, + poolInfo: { + poolId: body.poolInfo["poolId"], + autoPoolSpecification: !body.poolInfo.autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + body.poolInfo.autoPoolSpecification?.["autoPoolIdPrefix"], + poolLifetimeOption: + body.poolInfo.autoPoolSpecification?.["poolLifetimeOption"], + keepAlive: body.poolInfo.autoPoolSpecification?.["keepAlive"], + pool: !body.poolInfo.autoPoolSpecification?.pool + ? undefined + : { + displayName: + body.poolInfo.autoPoolSpecification?.pool?.[ + "displayName" + ], + vmSize: + body.poolInfo.autoPoolSpecification?.pool?.["vmSize"], + cloudServiceConfiguration: !body.poolInfo + .autoPoolSpecification?.pool?.cloudServiceConfiguration + ? undefined + : { + osFamily: + body.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + body.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "publisher" + ], + offer: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "offer" + ], + sku: body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "version" + ], + virtualMachineImageId: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + }, + nodeAgentSKUId: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "nodeAgentSKUId" + ], + windowsConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["dataDisks"] ?? + [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: body.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + }, + diskEncryptionConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.[ + "targets" + ], + }, + nodePlacementConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["extensions"] ?? + [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: + p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + body.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.osDisk?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + body.poolInfo.autoPoolSpecification?.pool?.[ + "taskSlotsPerNode" + ], + taskSchedulingPolicy: !body.poolInfo.autoPoolSpecification + ?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + body.poolInfo.autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + body.poolInfo.autoPoolSpecification?.pool?.[ + "resizeTimeout" + ], + targetDedicatedNodes: + body.poolInfo.autoPoolSpecification?.pool?.[ + "targetDedicatedNodes" + ], + targetLowPriorityNodes: + body.poolInfo.autoPoolSpecification?.pool?.[ + "targetLowPriorityNodes" + ], + enableAutoScale: + body.poolInfo.autoPoolSpecification?.pool?.[ + "enableAutoScale" + ], + autoScaleFormula: + body.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleFormula" + ], + autoScaleEvaluationInterval: + body.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleEvaluationInterval" + ], + enableInterNodeCommunication: + body.poolInfo.autoPoolSpecification?.pool?.[ + "enableInterNodeCommunication" + ], + networkConfiguration: !body.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration + ? undefined + : { + subnetId: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !body.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !body.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !body.poolInfo.autoPoolSpecification?.pool + ?.startTask + ? undefined + : { + commandLine: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["commandLine"], + containerSettings: !body.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "imageName" + ], + registry: !body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry + ? undefined + : { + username: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["username"], + password: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !body.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity + ? undefined + : { + username: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.["username"], + autoUser: !body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + maxTaskRetryCount: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["maxTaskRetryCount"], + waitForSuccess: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["waitForSuccess"], + }, + certificateReferences: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "certificateReferences" + ] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + body.poolInfo.autoPoolSpecification?.pool?.[ + "applicationLicenses" + ], + userAccounts: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "userAccounts" + ] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "metadata" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "mountConfiguration" + ] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.[ + "sasKey" + ], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.[ + "accountName" + ], + azureFileUrl: + p.azureFileShareConfiguration?.[ + "azureFileUrl" + ], + accountKey: + p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.[ + "mountOptions" + ], + }, + })), + targetNodeCommunicationMode: + body.poolInfo.autoPoolSpecification?.pool?.[ + "targetNodeCommunicationMode" + ], + }, + }, + }, + onAllTasksComplete: body["onAllTasksComplete"], + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }); +} + +export async function _replaceJobDeserialize( + result: ReplaceJob200Response | ReplaceJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This fully replaces all the updatable properties of the Job. For example, if + * the Job has constraints associated with it and if constraints is not specified + * with this request, then the Batch service will remove the existing constraints. + */ +export async function replaceJob( + context: Client, + jobId: string, + body: BatchJob, + options: ReplaceJobOptions = { requestOptions: {} } +): Promise { + const result = await _replaceJobSend(context, jobId, body, options); + return _replaceJobDeserialize(result); +} + +export function _disableJobSend( + context: Client, + jobId: string, + body: BatchJobDisableOptions, + options: DisableJobOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/disable", jobId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { disableTasks: body["disableTasks"] }, + }); +} + +export async function _disableJobDeserialize( + result: DisableJob202Response | DisableJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * The Batch Service immediately moves the Job to the disabling state. Batch then + * uses the disableTasks parameter to determine what to do with the currently + * running Tasks of the Job. The Job remains in the disabling state until the + * disable operation is completed and all Tasks have been dealt with according to + * the disableTasks option; the Job then moves to the disabled state. No new Tasks + * are started under the Job until it moves back to active state. If you try to + * disable a Job that is in any state other than active, disabling, or disabled, + * the request fails with status code 409. + */ +export async function disableJob( + context: Client, + jobId: string, + body: BatchJobDisableOptions, + options: DisableJobOptions = { requestOptions: {} } +): Promise { + const result = await _disableJobSend(context, jobId, body, options); + return _disableJobDeserialize(result); +} + +export function _enableJobSend( + context: Client, + jobId: string, + options: EnableJobOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/enable", jobId) + .post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _enableJobDeserialize( + result: EnableJob202Response | EnableJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * When you call this API, the Batch service sets a disabled Job to the enabling + * state. After the this operation is completed, the Job moves to the active + * state, and scheduling of new Tasks under the Job resumes. The Batch service + * does not allow a Task to remain in the active state for more than 180 days. + * Therefore, if you enable a Job containing active Tasks which were added more + * than 180 days ago, those Tasks will not run. + */ +export async function enableJob( + context: Client, + jobId: string, + options: EnableJobOptions = { requestOptions: {} } +): Promise { + const result = await _enableJobSend(context, jobId, options); + return _enableJobDeserialize(result); +} + +export function _terminateJobSend( + context: Client, + jobId: string, + body: BatchJobTerminateOptions, + options: TerminateJobOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/terminate", jobId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { terminateReason: body["terminateReason"] }, + }); +} + +export async function _terminateJobDeserialize( + result: TerminateJob202Response | TerminateJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * When a Terminate Job request is received, the Batch service sets the Job to the + * terminating state. The Batch service then terminates any running Tasks + * associated with the Job and runs any required Job release Tasks. Then the Job + * moves into the completed state. If there are any Tasks in the Job in the active + * state, they will remain in the active state. Once a Job is terminated, new + * Tasks cannot be added and any remaining active Tasks will not be scheduled. + */ +export async function terminateJob( + context: Client, + jobId: string, + body: BatchJobTerminateOptions, + options: TerminateJobOptions = { requestOptions: {} } +): Promise { + const result = await _terminateJobSend(context, jobId, body, options); + return _terminateJobDeserialize(result); +} + +export function _createJobSend( + context: Client, + body: BatchJobCreateOptions, + options: CreateJobOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + id: body["id"], + displayName: body["displayName"], + usesTaskDependencies: body["usesTaskDependencies"], + priority: body["priority"], + allowTaskPreemption: body["allowTaskPreemption"], + maxParallelTasks: body["maxParallelTasks"], + constraints: !body.constraints + ? undefined + : { + maxWallClockTime: body.constraints?.["maxWallClockTime"], + maxTaskRetryCount: body.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !body.jobManagerTask + ? undefined + : { + id: body.jobManagerTask?.["id"], + displayName: body.jobManagerTask?.["displayName"], + commandLine: body.jobManagerTask?.["commandLine"], + containerSettings: !body.jobManagerTask?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobManagerTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.jobManagerTask?.containerSettings?.["imageName"], + registry: !body.jobManagerTask?.containerSettings?.registry + ? undefined + : { + username: + body.jobManagerTask?.containerSettings?.registry?.[ + "username" + ], + password: + body.jobManagerTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + body.jobManagerTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobManagerTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + body.jobManagerTask?.containerSettings + ?.registry?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.jobManagerTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: (body.jobManagerTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + outputFiles: (body.jobManagerTask?.["outputFiles"] ?? []).map( + (p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: + p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + }) + ), + environmentSettings: ( + body.jobManagerTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !body.jobManagerTask?.constraints + ? undefined + : { + maxWallClockTime: + body.jobManagerTask?.constraints?.["maxWallClockTime"], + retentionTime: + body.jobManagerTask?.constraints?.["retentionTime"], + maxTaskRetryCount: + body.jobManagerTask?.constraints?.["maxTaskRetryCount"], + }, + requiredSlots: body.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: body.jobManagerTask?.["killJobOnCompletion"], + userIdentity: !body.jobManagerTask?.userIdentity + ? undefined + : { + username: body.jobManagerTask?.userIdentity?.["username"], + autoUser: !body.jobManagerTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobManagerTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + body.jobManagerTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + runExclusive: body.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + body.jobManagerTask?.["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !body.jobManagerTask + ?.authenticationTokenSettings + ? undefined + : { + access: + body.jobManagerTask?.authenticationTokenSettings?.[ + "access" + ], + }, + allowLowPriorityNode: + body.jobManagerTask?.["allowLowPriorityNode"], + }, + jobPreparationTask: !body.jobPreparationTask + ? undefined + : { + id: body.jobPreparationTask?.["id"], + commandLine: body.jobPreparationTask?.["commandLine"], + containerSettings: !body.jobPreparationTask?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobPreparationTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.jobPreparationTask?.containerSettings?.["imageName"], + registry: !body.jobPreparationTask?.containerSettings + ?.registry + ? undefined + : { + username: + body.jobPreparationTask?.containerSettings + ?.registry?.["username"], + password: + body.jobPreparationTask?.containerSettings + ?.registry?.["password"], + registryServer: + body.jobPreparationTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !body.jobPreparationTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + body.jobPreparationTask?.containerSettings + ?.registry?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.jobPreparationTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + body.jobPreparationTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + body.jobPreparationTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !body.jobPreparationTask?.constraints + ? undefined + : { + maxWallClockTime: + body.jobPreparationTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + body.jobPreparationTask?.constraints?.["retentionTime"], + maxTaskRetryCount: + body.jobPreparationTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + waitForSuccess: body.jobPreparationTask?.["waitForSuccess"], + userIdentity: !body.jobPreparationTask?.userIdentity + ? undefined + : { + username: + body.jobPreparationTask?.userIdentity?.["username"], + autoUser: !body.jobPreparationTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobPreparationTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + body.jobPreparationTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + rerunOnNodeRebootAfterSuccess: + body.jobPreparationTask?.["rerunOnNodeRebootAfterSuccess"], + }, + jobReleaseTask: !body.jobReleaseTask + ? undefined + : { + id: body.jobReleaseTask?.["id"], + commandLine: body.jobReleaseTask?.["commandLine"], + containerSettings: !body.jobReleaseTask?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobReleaseTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.jobReleaseTask?.containerSettings?.["imageName"], + registry: !body.jobReleaseTask?.containerSettings?.registry + ? undefined + : { + username: + body.jobReleaseTask?.containerSettings?.registry?.[ + "username" + ], + password: + body.jobReleaseTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + body.jobReleaseTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobReleaseTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + body.jobReleaseTask?.containerSettings + ?.registry?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.jobReleaseTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: (body.jobReleaseTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + body.jobReleaseTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: body.jobReleaseTask?.["maxWallClockTime"], + retentionTime: body.jobReleaseTask?.["retentionTime"], + userIdentity: !body.jobReleaseTask?.userIdentity + ? undefined + : { + username: body.jobReleaseTask?.userIdentity?.["username"], + autoUser: !body.jobReleaseTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobReleaseTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + body.jobReleaseTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + }, + commonEnvironmentSettings: ( + body["commonEnvironmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + poolInfo: { + poolId: body.poolInfo["poolId"], + autoPoolSpecification: !body.poolInfo.autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + body.poolInfo.autoPoolSpecification?.["autoPoolIdPrefix"], + poolLifetimeOption: + body.poolInfo.autoPoolSpecification?.["poolLifetimeOption"], + keepAlive: body.poolInfo.autoPoolSpecification?.["keepAlive"], + pool: !body.poolInfo.autoPoolSpecification?.pool + ? undefined + : { + displayName: + body.poolInfo.autoPoolSpecification?.pool?.[ + "displayName" + ], + vmSize: + body.poolInfo.autoPoolSpecification?.pool?.["vmSize"], + cloudServiceConfiguration: !body.poolInfo + .autoPoolSpecification?.pool?.cloudServiceConfiguration + ? undefined + : { + osFamily: + body.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + body.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "publisher" + ], + offer: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "offer" + ], + sku: body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "version" + ], + virtualMachineImageId: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + }, + nodeAgentSKUId: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "nodeAgentSKUId" + ], + windowsConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["dataDisks"] ?? + [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: body.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + }, + diskEncryptionConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.[ + "targets" + ], + }, + nodePlacementConfiguration: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["extensions"] ?? + [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: + p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !body.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !body.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + body.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.osDisk?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + body.poolInfo.autoPoolSpecification?.pool?.[ + "taskSlotsPerNode" + ], + taskSchedulingPolicy: !body.poolInfo.autoPoolSpecification + ?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + body.poolInfo.autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + body.poolInfo.autoPoolSpecification?.pool?.[ + "resizeTimeout" + ], + targetDedicatedNodes: + body.poolInfo.autoPoolSpecification?.pool?.[ + "targetDedicatedNodes" + ], + targetLowPriorityNodes: + body.poolInfo.autoPoolSpecification?.pool?.[ + "targetLowPriorityNodes" + ], + enableAutoScale: + body.poolInfo.autoPoolSpecification?.pool?.[ + "enableAutoScale" + ], + autoScaleFormula: + body.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleFormula" + ], + autoScaleEvaluationInterval: + body.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleEvaluationInterval" + ], + enableInterNodeCommunication: + body.poolInfo.autoPoolSpecification?.pool?.[ + "enableInterNodeCommunication" + ], + networkConfiguration: !body.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration + ? undefined + : { + subnetId: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !body.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !body.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + body.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !body.poolInfo.autoPoolSpecification?.pool + ?.startTask + ? undefined + : { + commandLine: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["commandLine"], + containerSettings: !body.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "imageName" + ], + registry: !body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry + ? undefined + : { + username: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["username"], + password: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !body.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity + ? undefined + : { + username: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.["username"], + autoUser: !body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + maxTaskRetryCount: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["maxTaskRetryCount"], + waitForSuccess: + body.poolInfo.autoPoolSpecification?.pool + ?.startTask?.["waitForSuccess"], + }, + certificateReferences: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "certificateReferences" + ] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + body.poolInfo.autoPoolSpecification?.pool?.[ + "applicationLicenses" + ], + userAccounts: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "userAccounts" + ] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "metadata" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + body.poolInfo.autoPoolSpecification?.pool?.[ + "mountConfiguration" + ] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.[ + "sasKey" + ], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.[ + "accountName" + ], + azureFileUrl: + p.azureFileShareConfiguration?.[ + "azureFileUrl" + ], + accountKey: + p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.[ + "mountOptions" + ], + }, + })), + targetNodeCommunicationMode: + body.poolInfo.autoPoolSpecification?.pool?.[ + "targetNodeCommunicationMode" + ], + }, + }, + }, + onAllTasksComplete: body["onAllTasksComplete"], + onTaskFailure: body["onTaskFailure"], + networkConfiguration: !body.networkConfiguration + ? undefined + : { subnetId: body.networkConfiguration?.["subnetId"] }, + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }); +} + +export async function _createJobDeserialize( + result: CreateJob201Response | CreateJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * The Batch service supports two ways to control the work done as part of a Job. + * In the first approach, the user specifies a Job Manager Task. The Batch service + * launches this Task when it is ready to start the Job. The Job Manager Task + * controls all other Tasks that run under this Job, by using the Task APIs. In + * the second approach, the user directly controls the execution of Tasks under an + * active Job, by using the Task APIs. Also note: when naming Jobs, avoid + * including sensitive information such as user names or secret project names. + * This information may appear in telemetry logs accessible to Microsoft Support + * engineers. + */ +export async function createJob( + context: Client, + body: BatchJobCreateOptions, + options: CreateJobOptions = { requestOptions: {} } +): Promise { + const result = await _createJobSend(context, body, options); + return _createJobDeserialize(result); +} + +export function _listJobsSend( + context: Client, + options: ListJobsOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _listJobsDeserialize( + result: ListJobs200Response | ListJobsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + id: p["id"], + displayName: p["displayName"], + usesTaskDependencies: p["usesTaskDependencies"], + url: p["url"], + eTag: p["eTag"], + lastModified: + p["lastModified"] !== undefined + ? new Date(p["lastModified"]) + : undefined, + creationTime: + p["creationTime"] !== undefined + ? new Date(p["creationTime"]) + : undefined, + state: p["state"], + stateTransitionTime: + p["stateTransitionTime"] !== undefined + ? new Date(p["stateTransitionTime"]) + : undefined, + previousState: p["previousState"], + previousStateTransitionTime: + p["previousStateTransitionTime"] !== undefined + ? new Date(p["previousStateTransitionTime"]) + : undefined, + priority: p["priority"], + allowTaskPreemption: p["allowTaskPreemption"], + maxParallelTasks: p["maxParallelTasks"], + constraints: !p.constraints + ? undefined + : { + maxWallClockTime: p.constraints?.["maxWallClockTime"], + maxTaskRetryCount: p.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !p.jobManagerTask + ? undefined + : { + id: p.jobManagerTask?.["id"], + displayName: p.jobManagerTask?.["displayName"], + commandLine: p.jobManagerTask?.["commandLine"], + containerSettings: !p.jobManagerTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobManagerTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: p.jobManagerTask?.containerSettings?.["imageName"], + registry: !p.jobManagerTask?.containerSettings?.registry + ? undefined + : { + username: + p.jobManagerTask?.containerSettings?.registry?.[ + "username" + ], + password: + p.jobManagerTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + p.jobManagerTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !p.jobManagerTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobManagerTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobManagerTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (p.jobManagerTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + outputFiles: (p.jobManagerTask?.["outputFiles"] ?? []).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: ( + p.jobManagerTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !p.jobManagerTask?.constraints + ? undefined + : { + maxWallClockTime: + p.jobManagerTask?.constraints?.["maxWallClockTime"], + retentionTime: + p.jobManagerTask?.constraints?.["retentionTime"], + maxTaskRetryCount: + p.jobManagerTask?.constraints?.["maxTaskRetryCount"], + }, + requiredSlots: p.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: p.jobManagerTask?.["killJobOnCompletion"], + userIdentity: !p.jobManagerTask?.userIdentity + ? undefined + : { + username: p.jobManagerTask?.userIdentity?.["username"], + autoUser: !p.jobManagerTask?.userIdentity?.autoUser + ? undefined + : { + scope: + p.jobManagerTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + p.jobManagerTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + runExclusive: p.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + p.jobManagerTask?.["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !p.jobManagerTask + ?.authenticationTokenSettings + ? undefined + : { + access: + p.jobManagerTask?.authenticationTokenSettings?.["access"], + }, + allowLowPriorityNode: p.jobManagerTask?.["allowLowPriorityNode"], + }, + jobPreparationTask: !p.jobPreparationTask + ? undefined + : { + id: p.jobPreparationTask?.["id"], + commandLine: p.jobPreparationTask?.["commandLine"], + containerSettings: !p.jobPreparationTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobPreparationTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + p.jobPreparationTask?.containerSettings?.["imageName"], + registry: !p.jobPreparationTask?.containerSettings?.registry + ? undefined + : { + username: + p.jobPreparationTask?.containerSettings?.registry?.[ + "username" + ], + password: + p.jobPreparationTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + p.jobPreparationTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !p.jobPreparationTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobPreparationTask?.containerSettings + ?.registry?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobPreparationTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: (p.jobPreparationTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + p.jobPreparationTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !p.jobPreparationTask?.constraints + ? undefined + : { + maxWallClockTime: + p.jobPreparationTask?.constraints?.["maxWallClockTime"], + retentionTime: + p.jobPreparationTask?.constraints?.["retentionTime"], + maxTaskRetryCount: + p.jobPreparationTask?.constraints?.["maxTaskRetryCount"], + }, + waitForSuccess: p.jobPreparationTask?.["waitForSuccess"], + userIdentity: !p.jobPreparationTask?.userIdentity + ? undefined + : { + username: p.jobPreparationTask?.userIdentity?.["username"], + autoUser: !p.jobPreparationTask?.userIdentity?.autoUser + ? undefined + : { + scope: + p.jobPreparationTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + p.jobPreparationTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + rerunOnNodeRebootAfterSuccess: + p.jobPreparationTask?.["rerunOnNodeRebootAfterSuccess"], + }, + jobReleaseTask: !p.jobReleaseTask + ? undefined + : { + id: p.jobReleaseTask?.["id"], + commandLine: p.jobReleaseTask?.["commandLine"], + containerSettings: !p.jobReleaseTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobReleaseTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: p.jobReleaseTask?.containerSettings?.["imageName"], + registry: !p.jobReleaseTask?.containerSettings?.registry + ? undefined + : { + username: + p.jobReleaseTask?.containerSettings?.registry?.[ + "username" + ], + password: + p.jobReleaseTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + p.jobReleaseTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !p.jobReleaseTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobReleaseTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobReleaseTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (p.jobReleaseTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + p.jobReleaseTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: p.jobReleaseTask?.["maxWallClockTime"], + retentionTime: p.jobReleaseTask?.["retentionTime"], + userIdentity: !p.jobReleaseTask?.userIdentity + ? undefined + : { + username: p.jobReleaseTask?.userIdentity?.["username"], + autoUser: !p.jobReleaseTask?.userIdentity?.autoUser + ? undefined + : { + scope: + p.jobReleaseTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + p.jobReleaseTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + }, + commonEnvironmentSettings: (p["commonEnvironmentSettings"] ?? []).map( + (p) => ({ name: p["name"], value: p["value"] }) + ), + poolInfo: { + poolId: p.poolInfo["poolId"], + autoPoolSpecification: !p.poolInfo.autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + p.poolInfo.autoPoolSpecification?.["autoPoolIdPrefix"], + poolLifetimeOption: + p.poolInfo.autoPoolSpecification?.["poolLifetimeOption"], + keepAlive: p.poolInfo.autoPoolSpecification?.["keepAlive"], + pool: !p.poolInfo.autoPoolSpecification?.pool + ? undefined + : { + displayName: + p.poolInfo.autoPoolSpecification?.pool?.["displayName"], + vmSize: p.poolInfo.autoPoolSpecification?.pool?.["vmSize"], + cloudServiceConfiguration: !p.poolInfo.autoPoolSpecification + ?.pool?.cloudServiceConfiguration + ? undefined + : { + osFamily: + p.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + p.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !p.poolInfo + .autoPoolSpecification?.pool?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "publisher" + ], + offer: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "offer" + ], + sku: p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "version" + ], + virtualMachineImageId: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + exactVersion: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "exactVersion" + ], + }, + nodeAgentSKUId: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["nodeAgentSKUId"], + windowsConfiguration: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["dataDisks"] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + }, + diskEncryptionConfiguration: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.["targets"], + }, + nodePlacementConfiguration: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["extensions"] ?? + [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + p.poolInfo.autoPoolSpecification?.pool?.[ + "taskSlotsPerNode" + ], + taskSchedulingPolicy: !p.poolInfo.autoPoolSpecification + ?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + p.poolInfo.autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + p.poolInfo.autoPoolSpecification?.pool?.["resizeTimeout"], + targetDedicatedNodes: + p.poolInfo.autoPoolSpecification?.pool?.[ + "targetDedicatedNodes" + ], + targetLowPriorityNodes: + p.poolInfo.autoPoolSpecification?.pool?.[ + "targetLowPriorityNodes" + ], + enableAutoScale: + p.poolInfo.autoPoolSpecification?.pool?.[ + "enableAutoScale" + ], + autoScaleFormula: + p.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleFormula" + ], + autoScaleEvaluationInterval: + p.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleEvaluationInterval" + ], + enableInterNodeCommunication: + p.poolInfo.autoPoolSpecification?.pool?.[ + "enableInterNodeCommunication" + ], + networkConfiguration: !p.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration + ? undefined + : { + subnetId: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !p.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !p.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !p.poolInfo.autoPoolSpecification?.pool + ?.startTask + ? undefined + : { + commandLine: + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "commandLine" + ], + containerSettings: !p.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "imageName" + ], + registry: !p.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings?.registry + ? undefined + : { + username: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["username"], + password: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !p.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + p.poolInfo.autoPoolSpecification + ?.pool?.startTask + ?.containerSettings?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity + ? undefined + : { + username: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.["username"], + autoUser: !p.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "maxTaskRetryCount" + ], + waitForSuccess: + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "waitForSuccess" + ], + }, + certificateReferences: ( + p.poolInfo.autoPoolSpecification?.pool?.[ + "certificateReferences" + ] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + p.poolInfo.autoPoolSpecification?.pool?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + p.poolInfo.autoPoolSpecification?.pool?.[ + "applicationLicenses" + ], + userAccounts: ( + p.poolInfo.autoPoolSpecification?.pool?.[ + "userAccounts" + ] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + p.poolInfo.autoPoolSpecification?.pool?.["metadata"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + p.poolInfo.autoPoolSpecification?.pool?.[ + "mountConfiguration" + ] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.["sasKey"], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.["accountName"], + azureFileUrl: + p.azureFileShareConfiguration?.["azureFileUrl"], + accountKey: + p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.["mountOptions"], + }, + })), + targetNodeCommunicationMode: + p.poolInfo.autoPoolSpecification?.pool?.[ + "targetNodeCommunicationMode" + ], + }, + }, + }, + onAllTasksComplete: p["onAllTasksComplete"], + onTaskFailure: p["onTaskFailure"], + networkConfiguration: !p.networkConfiguration + ? undefined + : { subnetId: p.networkConfiguration?.["subnetId"] }, + metadata: (p["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + executionInfo: !p.executionInfo + ? undefined + : { + startTime: new Date(p.executionInfo?.["startTime"]), + endTime: + p.executionInfo?.["endTime"] !== undefined + ? new Date(p.executionInfo?.["endTime"]) + : undefined, + poolId: p.executionInfo?.["poolId"], + schedulingError: !p.executionInfo?.schedulingError + ? undefined + : { + category: p.executionInfo?.schedulingError?.["category"], + code: p.executionInfo?.schedulingError?.["code"], + message: p.executionInfo?.schedulingError?.["message"], + details: ( + p.executionInfo?.schedulingError?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + terminateReason: p.executionInfo?.["terminateReason"], + }, + stats: !p.stats + ? undefined + : { + url: p.stats?.["url"], + startTime: new Date(p.stats?.["startTime"]), + lastUpdateTime: new Date(p.stats?.["lastUpdateTime"]), + userCPUTime: p.stats?.["userCPUTime"], + kernelCPUTime: p.stats?.["kernelCPUTime"], + wallClockTime: p.stats?.["wallClockTime"], + readIOps: p.stats?.["readIOps"], + writeIOps: p.stats?.["writeIOps"], + readIOGiB: p.stats?.["readIOGiB"], + writeIOGiB: p.stats?.["writeIOGiB"], + numSucceededTasks: p.stats?.["numSucceededTasks"], + numFailedTasks: p.stats?.["numFailedTasks"], + numTaskRetries: p.stats?.["numTaskRetries"], + waitTime: p.stats?.["waitTime"], + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists all of the Jobs in the specified Account. */ +export async function listJobs( + context: Client, + options: ListJobsOptions = { requestOptions: {} } +): Promise { + const result = await _listJobsSend(context, options); + return _listJobsDeserialize(result); +} + +export function _listJobsFromScheduleSend( + context: Client, + jobScheduleId: string, + options: ListJobsFromScheduleOptions = { requestOptions: {} } +): StreamableMethod< + ListJobsFromSchedule200Response | ListJobsFromScheduleDefaultResponse +> { + return context + .path("/jobschedules/{jobScheduleId}/jobs", jobScheduleId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _listJobsFromScheduleDeserialize( + result: ListJobsFromSchedule200Response | ListJobsFromScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + id: p["id"], + displayName: p["displayName"], + usesTaskDependencies: p["usesTaskDependencies"], + url: p["url"], + eTag: p["eTag"], + lastModified: + p["lastModified"] !== undefined + ? new Date(p["lastModified"]) + : undefined, + creationTime: + p["creationTime"] !== undefined + ? new Date(p["creationTime"]) + : undefined, + state: p["state"], + stateTransitionTime: + p["stateTransitionTime"] !== undefined + ? new Date(p["stateTransitionTime"]) + : undefined, + previousState: p["previousState"], + previousStateTransitionTime: + p["previousStateTransitionTime"] !== undefined + ? new Date(p["previousStateTransitionTime"]) + : undefined, + priority: p["priority"], + allowTaskPreemption: p["allowTaskPreemption"], + maxParallelTasks: p["maxParallelTasks"], + constraints: !p.constraints + ? undefined + : { + maxWallClockTime: p.constraints?.["maxWallClockTime"], + maxTaskRetryCount: p.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !p.jobManagerTask + ? undefined + : { + id: p.jobManagerTask?.["id"], + displayName: p.jobManagerTask?.["displayName"], + commandLine: p.jobManagerTask?.["commandLine"], + containerSettings: !p.jobManagerTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobManagerTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: p.jobManagerTask?.containerSettings?.["imageName"], + registry: !p.jobManagerTask?.containerSettings?.registry + ? undefined + : { + username: + p.jobManagerTask?.containerSettings?.registry?.[ + "username" + ], + password: + p.jobManagerTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + p.jobManagerTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !p.jobManagerTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobManagerTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobManagerTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (p.jobManagerTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + outputFiles: (p.jobManagerTask?.["outputFiles"] ?? []).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: ( + p.jobManagerTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !p.jobManagerTask?.constraints + ? undefined + : { + maxWallClockTime: + p.jobManagerTask?.constraints?.["maxWallClockTime"], + retentionTime: + p.jobManagerTask?.constraints?.["retentionTime"], + maxTaskRetryCount: + p.jobManagerTask?.constraints?.["maxTaskRetryCount"], + }, + requiredSlots: p.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: p.jobManagerTask?.["killJobOnCompletion"], + userIdentity: !p.jobManagerTask?.userIdentity + ? undefined + : { + username: p.jobManagerTask?.userIdentity?.["username"], + autoUser: !p.jobManagerTask?.userIdentity?.autoUser + ? undefined + : { + scope: + p.jobManagerTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + p.jobManagerTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + runExclusive: p.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + p.jobManagerTask?.["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !p.jobManagerTask + ?.authenticationTokenSettings + ? undefined + : { + access: + p.jobManagerTask?.authenticationTokenSettings?.["access"], + }, + allowLowPriorityNode: p.jobManagerTask?.["allowLowPriorityNode"], + }, + jobPreparationTask: !p.jobPreparationTask + ? undefined + : { + id: p.jobPreparationTask?.["id"], + commandLine: p.jobPreparationTask?.["commandLine"], + containerSettings: !p.jobPreparationTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobPreparationTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + p.jobPreparationTask?.containerSettings?.["imageName"], + registry: !p.jobPreparationTask?.containerSettings?.registry + ? undefined + : { + username: + p.jobPreparationTask?.containerSettings?.registry?.[ + "username" + ], + password: + p.jobPreparationTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + p.jobPreparationTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !p.jobPreparationTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobPreparationTask?.containerSettings + ?.registry?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobPreparationTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: (p.jobPreparationTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + p.jobPreparationTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !p.jobPreparationTask?.constraints + ? undefined + : { + maxWallClockTime: + p.jobPreparationTask?.constraints?.["maxWallClockTime"], + retentionTime: + p.jobPreparationTask?.constraints?.["retentionTime"], + maxTaskRetryCount: + p.jobPreparationTask?.constraints?.["maxTaskRetryCount"], + }, + waitForSuccess: p.jobPreparationTask?.["waitForSuccess"], + userIdentity: !p.jobPreparationTask?.userIdentity + ? undefined + : { + username: p.jobPreparationTask?.userIdentity?.["username"], + autoUser: !p.jobPreparationTask?.userIdentity?.autoUser + ? undefined + : { + scope: + p.jobPreparationTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + p.jobPreparationTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + rerunOnNodeRebootAfterSuccess: + p.jobPreparationTask?.["rerunOnNodeRebootAfterSuccess"], + }, + jobReleaseTask: !p.jobReleaseTask + ? undefined + : { + id: p.jobReleaseTask?.["id"], + commandLine: p.jobReleaseTask?.["commandLine"], + containerSettings: !p.jobReleaseTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobReleaseTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: p.jobReleaseTask?.containerSettings?.["imageName"], + registry: !p.jobReleaseTask?.containerSettings?.registry + ? undefined + : { + username: + p.jobReleaseTask?.containerSettings?.registry?.[ + "username" + ], + password: + p.jobReleaseTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + p.jobReleaseTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !p.jobReleaseTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobReleaseTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobReleaseTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (p.jobReleaseTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + p.jobReleaseTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: p.jobReleaseTask?.["maxWallClockTime"], + retentionTime: p.jobReleaseTask?.["retentionTime"], + userIdentity: !p.jobReleaseTask?.userIdentity + ? undefined + : { + username: p.jobReleaseTask?.userIdentity?.["username"], + autoUser: !p.jobReleaseTask?.userIdentity?.autoUser + ? undefined + : { + scope: + p.jobReleaseTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + p.jobReleaseTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + }, + commonEnvironmentSettings: (p["commonEnvironmentSettings"] ?? []).map( + (p) => ({ name: p["name"], value: p["value"] }) + ), + poolInfo: { + poolId: p.poolInfo["poolId"], + autoPoolSpecification: !p.poolInfo.autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + p.poolInfo.autoPoolSpecification?.["autoPoolIdPrefix"], + poolLifetimeOption: + p.poolInfo.autoPoolSpecification?.["poolLifetimeOption"], + keepAlive: p.poolInfo.autoPoolSpecification?.["keepAlive"], + pool: !p.poolInfo.autoPoolSpecification?.pool + ? undefined + : { + displayName: + p.poolInfo.autoPoolSpecification?.pool?.["displayName"], + vmSize: p.poolInfo.autoPoolSpecification?.pool?.["vmSize"], + cloudServiceConfiguration: !p.poolInfo.autoPoolSpecification + ?.pool?.cloudServiceConfiguration + ? undefined + : { + osFamily: + p.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + p.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !p.poolInfo + .autoPoolSpecification?.pool?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "publisher" + ], + offer: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "offer" + ], + sku: p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "version" + ], + virtualMachineImageId: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + exactVersion: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "exactVersion" + ], + }, + nodeAgentSKUId: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["nodeAgentSKUId"], + windowsConfiguration: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["dataDisks"] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + }, + diskEncryptionConfiguration: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.["targets"], + }, + nodePlacementConfiguration: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["extensions"] ?? + [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !p.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + p.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + p.poolInfo.autoPoolSpecification?.pool?.[ + "taskSlotsPerNode" + ], + taskSchedulingPolicy: !p.poolInfo.autoPoolSpecification + ?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + p.poolInfo.autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + p.poolInfo.autoPoolSpecification?.pool?.["resizeTimeout"], + targetDedicatedNodes: + p.poolInfo.autoPoolSpecification?.pool?.[ + "targetDedicatedNodes" + ], + targetLowPriorityNodes: + p.poolInfo.autoPoolSpecification?.pool?.[ + "targetLowPriorityNodes" + ], + enableAutoScale: + p.poolInfo.autoPoolSpecification?.pool?.[ + "enableAutoScale" + ], + autoScaleFormula: + p.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleFormula" + ], + autoScaleEvaluationInterval: + p.poolInfo.autoPoolSpecification?.pool?.[ + "autoScaleEvaluationInterval" + ], + enableInterNodeCommunication: + p.poolInfo.autoPoolSpecification?.pool?.[ + "enableInterNodeCommunication" + ], + networkConfiguration: !p.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration + ? undefined + : { + subnetId: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !p.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !p.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + p.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !p.poolInfo.autoPoolSpecification?.pool + ?.startTask + ? undefined + : { + commandLine: + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "commandLine" + ], + containerSettings: !p.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "imageName" + ], + registry: !p.poolInfo.autoPoolSpecification + ?.pool?.startTask?.containerSettings?.registry + ? undefined + : { + username: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["username"], + password: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !p.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + p.poolInfo.autoPoolSpecification + ?.pool?.startTask + ?.containerSettings?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity + ? undefined + : { + username: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.["username"], + autoUser: !p.poolInfo.autoPoolSpecification + ?.pool?.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + p.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "maxTaskRetryCount" + ], + waitForSuccess: + p.poolInfo.autoPoolSpecification?.pool?.startTask?.[ + "waitForSuccess" + ], + }, + certificateReferences: ( + p.poolInfo.autoPoolSpecification?.pool?.[ + "certificateReferences" + ] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + p.poolInfo.autoPoolSpecification?.pool?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + p.poolInfo.autoPoolSpecification?.pool?.[ + "applicationLicenses" + ], + userAccounts: ( + p.poolInfo.autoPoolSpecification?.pool?.[ + "userAccounts" + ] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + p.poolInfo.autoPoolSpecification?.pool?.["metadata"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + p.poolInfo.autoPoolSpecification?.pool?.[ + "mountConfiguration" + ] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.["sasKey"], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.["accountName"], + azureFileUrl: + p.azureFileShareConfiguration?.["azureFileUrl"], + accountKey: + p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.["mountOptions"], + }, + })), + targetNodeCommunicationMode: + p.poolInfo.autoPoolSpecification?.pool?.[ + "targetNodeCommunicationMode" + ], + }, + }, + }, + onAllTasksComplete: p["onAllTasksComplete"], + onTaskFailure: p["onTaskFailure"], + networkConfiguration: !p.networkConfiguration + ? undefined + : { subnetId: p.networkConfiguration?.["subnetId"] }, + metadata: (p["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + executionInfo: !p.executionInfo + ? undefined + : { + startTime: new Date(p.executionInfo?.["startTime"]), + endTime: + p.executionInfo?.["endTime"] !== undefined + ? new Date(p.executionInfo?.["endTime"]) + : undefined, + poolId: p.executionInfo?.["poolId"], + schedulingError: !p.executionInfo?.schedulingError + ? undefined + : { + category: p.executionInfo?.schedulingError?.["category"], + code: p.executionInfo?.schedulingError?.["code"], + message: p.executionInfo?.schedulingError?.["message"], + details: ( + p.executionInfo?.schedulingError?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + terminateReason: p.executionInfo?.["terminateReason"], + }, + stats: !p.stats + ? undefined + : { + url: p.stats?.["url"], + startTime: new Date(p.stats?.["startTime"]), + lastUpdateTime: new Date(p.stats?.["lastUpdateTime"]), + userCPUTime: p.stats?.["userCPUTime"], + kernelCPUTime: p.stats?.["kernelCPUTime"], + wallClockTime: p.stats?.["wallClockTime"], + readIOps: p.stats?.["readIOps"], + writeIOps: p.stats?.["writeIOps"], + readIOGiB: p.stats?.["readIOGiB"], + writeIOGiB: p.stats?.["writeIOGiB"], + numSucceededTasks: p.stats?.["numSucceededTasks"], + numFailedTasks: p.stats?.["numFailedTasks"], + numTaskRetries: p.stats?.["numTaskRetries"], + waitTime: p.stats?.["waitTime"], + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists the Jobs that have been created under the specified Job Schedule. */ +export async function listJobsFromSchedule( + context: Client, + jobScheduleId: string, + options: ListJobsFromScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _listJobsFromScheduleSend( + context, + jobScheduleId, + options + ); + return _listJobsFromScheduleDeserialize(result); +} + +export function _listJobPreparationAndReleaseTaskStatusSend( + context: Client, + jobId: string, + options: ListJobPreparationAndReleaseTaskStatusOptions = { + requestOptions: {}, + } +): StreamableMethod< + | ListJobPreparationAndReleaseTaskStatus200Response + | ListJobPreparationAndReleaseTaskStatusDefaultResponse +> { + return context + .path("/jobs/{jobId}/jobpreparationandreleasetaskstatus", jobId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + $select: options?.$select, + }, + }); +} + +export async function _listJobPreparationAndReleaseTaskStatusDeserialize( + result: + | ListJobPreparationAndReleaseTaskStatus200Response + | ListJobPreparationAndReleaseTaskStatusDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + poolId: p["poolId"], + nodeId: p["nodeId"], + nodeUrl: p["nodeUrl"], + jobPreparationTaskExecutionInfo: !p.jobPreparationTaskExecutionInfo + ? undefined + : { + startTime: new Date( + p.jobPreparationTaskExecutionInfo?.["startTime"] + ), + endTime: + p.jobPreparationTaskExecutionInfo?.["endTime"] !== undefined + ? new Date(p.jobPreparationTaskExecutionInfo?.["endTime"]) + : undefined, + state: p.jobPreparationTaskExecutionInfo?.["state"], + taskRootDirectory: + p.jobPreparationTaskExecutionInfo?.["taskRootDirectory"], + taskRootDirectoryUrl: + p.jobPreparationTaskExecutionInfo?.["taskRootDirectoryUrl"], + exitCode: p.jobPreparationTaskExecutionInfo?.["exitCode"], + containerInfo: !p.jobPreparationTaskExecutionInfo?.containerInfo + ? undefined + : { + containerId: + p.jobPreparationTaskExecutionInfo?.containerInfo?.[ + "containerId" + ], + state: + p.jobPreparationTaskExecutionInfo?.containerInfo?.["state"], + error: + p.jobPreparationTaskExecutionInfo?.containerInfo?.["error"], + }, + failureInfo: !p.jobPreparationTaskExecutionInfo?.failureInfo + ? undefined + : { + category: + p.jobPreparationTaskExecutionInfo?.failureInfo?.[ + "category" + ], + code: p.jobPreparationTaskExecutionInfo?.failureInfo?.[ + "code" + ], + message: + p.jobPreparationTaskExecutionInfo?.failureInfo?.["message"], + details: ( + p.jobPreparationTaskExecutionInfo?.failureInfo?.[ + "details" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + retryCount: p.jobPreparationTaskExecutionInfo?.["retryCount"], + lastRetryTime: + p.jobPreparationTaskExecutionInfo?.["lastRetryTime"] !== undefined + ? new Date(p.jobPreparationTaskExecutionInfo?.["lastRetryTime"]) + : undefined, + result: p.jobPreparationTaskExecutionInfo?.["result"], + }, + jobReleaseTaskExecutionInfo: !p.jobReleaseTaskExecutionInfo + ? undefined + : { + startTime: new Date(p.jobReleaseTaskExecutionInfo?.["startTime"]), + endTime: + p.jobReleaseTaskExecutionInfo?.["endTime"] !== undefined + ? new Date(p.jobReleaseTaskExecutionInfo?.["endTime"]) + : undefined, + state: p.jobReleaseTaskExecutionInfo?.["state"], + taskRootDirectory: + p.jobReleaseTaskExecutionInfo?.["taskRootDirectory"], + taskRootDirectoryUrl: + p.jobReleaseTaskExecutionInfo?.["taskRootDirectoryUrl"], + exitCode: p.jobReleaseTaskExecutionInfo?.["exitCode"], + containerInfo: !p.jobReleaseTaskExecutionInfo?.containerInfo + ? undefined + : { + containerId: + p.jobReleaseTaskExecutionInfo?.containerInfo?.[ + "containerId" + ], + state: + p.jobReleaseTaskExecutionInfo?.containerInfo?.["state"], + error: + p.jobReleaseTaskExecutionInfo?.containerInfo?.["error"], + }, + failureInfo: !p.jobReleaseTaskExecutionInfo?.failureInfo + ? undefined + : { + category: + p.jobReleaseTaskExecutionInfo?.failureInfo?.["category"], + code: p.jobReleaseTaskExecutionInfo?.failureInfo?.["code"], + message: + p.jobReleaseTaskExecutionInfo?.failureInfo?.["message"], + details: ( + p.jobReleaseTaskExecutionInfo?.failureInfo?.["details"] ?? + [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + result: p.jobReleaseTaskExecutionInfo?.["result"], + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** + * This API returns the Job Preparation and Job Release Task status on all Compute + * Nodes that have run the Job Preparation or Job Release Task. This includes + * Compute Nodes which have since been removed from the Pool. If this API is + * invoked on a Job which has no Job Preparation or Job Release Task, the Batch + * service returns HTTP status code 409 (Conflict) with an error code of + * JobPreparationTaskNotSpecified. + */ +export async function listJobPreparationAndReleaseTaskStatus( + context: Client, + jobId: string, + options: ListJobPreparationAndReleaseTaskStatusOptions = { + requestOptions: {}, + } +): Promise { + const result = await _listJobPreparationAndReleaseTaskStatusSend( + context, + jobId, + options + ); + return _listJobPreparationAndReleaseTaskStatusDeserialize(result); +} + +export function _getJobTaskCountsSend( + context: Client, + jobId: string, + options: GetJobTaskCountsOptions = { requestOptions: {} } +): StreamableMethod< + GetJobTaskCounts200Response | GetJobTaskCountsDefaultResponse +> { + return context + .path("/jobs/{jobId}/taskcounts", jobId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _getJobTaskCountsDeserialize( + result: GetJobTaskCounts200Response | GetJobTaskCountsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + taskCounts: { + active: result.body.taskCounts["active"], + running: result.body.taskCounts["running"], + completed: result.body.taskCounts["completed"], + succeeded: result.body.taskCounts["succeeded"], + failed: result.body.taskCounts["failed"], + }, + taskSlotCounts: { + active: result.body.taskSlotCounts["active"], + running: result.body.taskSlotCounts["running"], + completed: result.body.taskSlotCounts["completed"], + succeeded: result.body.taskSlotCounts["succeeded"], + failed: result.body.taskSlotCounts["failed"], + }, + }; +} + +/** + * Task counts provide a count of the Tasks by active, running or completed Task + * state, and a count of Tasks which succeeded or failed. Tasks in the preparing + * state are counted as running. Note that the numbers returned may not always be + * up to date. If you need exact task counts, use a list query. + */ +export async function getJobTaskCounts( + context: Client, + jobId: string, + options: GetJobTaskCountsOptions = { requestOptions: {} } +): Promise { + const result = await _getJobTaskCountsSend(context, jobId, options); + return _getJobTaskCountsDeserialize(result); +} + +export function _createCertificateSend( + context: Client, + body: BatchCertificate, + options: CreateCertificateOptions = { requestOptions: {} } +): StreamableMethod< + CreateCertificate201Response | CreateCertificateDefaultResponse +> { + return context + .path("/certificates") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + thumbprint: body["thumbprint"], + thumbprintAlgorithm: body["thumbprintAlgorithm"], + data: uint8ArrayToString(body["data"], "base64"), + certificateFormat: body["certificateFormat"], + password: body["password"], + }, + }); +} + +export async function _createCertificateDeserialize( + result: CreateCertificate201Response | CreateCertificateDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Creates a Certificate to the specified Account. */ +export async function createCertificate( + context: Client, + body: BatchCertificate, + options: CreateCertificateOptions = { requestOptions: {} } +): Promise { + const result = await _createCertificateSend(context, body, options); + return _createCertificateDeserialize(result); +} + +export function _listCertificatesSend( + context: Client, + options: ListCertificatesOptions = { requestOptions: {} } +): StreamableMethod< + ListCertificates200Response | ListCertificatesDefaultResponse +> { + return context + .path("/certificates") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + $select: options?.$select, + }, + }); +} + +export async function _listCertificatesDeserialize( + result: ListCertificates200Response | ListCertificatesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + url: p["url"], + state: p["state"], + stateTransitionTime: + p["stateTransitionTime"] !== undefined + ? new Date(p["stateTransitionTime"]) + : undefined, + previousState: p["previousState"], + previousStateTransitionTime: + p["previousStateTransitionTime"] !== undefined + ? new Date(p["previousStateTransitionTime"]) + : undefined, + publicData: + typeof p["publicData"] === "string" + ? stringToUint8Array(p["publicData"], "base64") + : p["publicData"], + deleteCertificateError: !p.deleteCertificateError + ? undefined + : { + code: p.deleteCertificateError?.["code"], + message: p.deleteCertificateError?.["message"], + values: (p.deleteCertificateError?.["values"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + data: + typeof p["data"] === "string" + ? stringToUint8Array(p["data"], "base64") + : p["data"], + certificateFormat: p["certificateFormat"], + password: p["password"], + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists all of the Certificates that have been added to the specified Account. */ +export async function listCertificates( + context: Client, + options: ListCertificatesOptions = { requestOptions: {} } +): Promise { + const result = await _listCertificatesSend(context, options); + return _listCertificatesDeserialize(result); +} + +export function _cancelCertificateDeletionSend( + context: Client, + thumbprintAlgorithm: string, + thumbprint: string, + options: CancelCertificateDeletionOptions = { requestOptions: {} } +): StreamableMethod< + | CancelCertificateDeletion204Response + | CancelCertificateDeletionDefaultResponse +> { + return context + .path( + "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete", + thumbprintAlgorithm, + thumbprint + ) + .post({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _cancelCertificateDeletionDeserialize( + result: + | CancelCertificateDeletion204Response + | CancelCertificateDeletionDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * If you try to delete a Certificate that is being used by a Pool or Compute + * Node, the status of the Certificate changes to deleteFailed. If you decide that + * you want to continue using the Certificate, you can use this operation to set + * the status of the Certificate back to active. If you intend to delete the + * Certificate, you do not need to run this operation after the deletion failed. + * You must make sure that the Certificate is not being used by any resources, and + * then you can try again to delete the Certificate. + */ +export async function cancelCertificateDeletion( + context: Client, + thumbprintAlgorithm: string, + thumbprint: string, + options: CancelCertificateDeletionOptions = { requestOptions: {} } +): Promise { + const result = await _cancelCertificateDeletionSend( + context, + thumbprintAlgorithm, + thumbprint, + options + ); + return _cancelCertificateDeletionDeserialize(result); +} + +export function _deleteCertificateSend( + context: Client, + thumbprintAlgorithm: string, + thumbprint: string, + options: DeleteCertificateOptions = { requestOptions: {} } +): StreamableMethod< + DeleteCertificate202Response | DeleteCertificateDefaultResponse +> { + return context + .path( + "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", + thumbprintAlgorithm, + thumbprint + ) + .delete({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _deleteCertificateDeserialize( + result: DeleteCertificate202Response | DeleteCertificateDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * You cannot delete a Certificate if a resource (Pool or Compute Node) is using + * it. Before you can delete a Certificate, you must therefore make sure that the + * Certificate is not associated with any existing Pools, the Certificate is not + * installed on any Nodes (even if you remove a Certificate from a Pool, it is not + * removed from existing Compute Nodes in that Pool until they restart), and no + * running Tasks depend on the Certificate. If you try to delete a Certificate + * that is in use, the deletion fails. The Certificate status changes to + * deleteFailed. You can use Cancel Delete Certificate to set the status back to + * active if you decide that you want to continue using the Certificate. + */ +export async function deleteCertificate( + context: Client, + thumbprintAlgorithm: string, + thumbprint: string, + options: DeleteCertificateOptions = { requestOptions: {} } +): Promise { + const result = await _deleteCertificateSend( + context, + thumbprintAlgorithm, + thumbprint, + options + ); + return _deleteCertificateDeserialize(result); +} + +export function _getCertificateSend( + context: Client, + thumbprintAlgorithm: string, + thumbprint: string, + options: GetCertificateOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path( + "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", + thumbprintAlgorithm, + thumbprint + ) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut, $select: options?.$select }, + }); +} + +export async function _getCertificateDeserialize( + result: GetCertificate200Response | GetCertificateDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + thumbprint: result.body["thumbprint"], + thumbprintAlgorithm: result.body["thumbprintAlgorithm"], + url: result.body["url"], + state: result.body["state"], + stateTransitionTime: + result.body["stateTransitionTime"] !== undefined + ? new Date(result.body["stateTransitionTime"]) + : undefined, + previousState: result.body["previousState"], + previousStateTransitionTime: + result.body["previousStateTransitionTime"] !== undefined + ? new Date(result.body["previousStateTransitionTime"]) + : undefined, + publicData: + typeof result.body["publicData"] === "string" + ? stringToUint8Array(result.body["publicData"], "base64") + : result.body["publicData"], + deleteCertificateError: !result.body.deleteCertificateError + ? undefined + : { + code: result.body.deleteCertificateError?.["code"], + message: result.body.deleteCertificateError?.["message"], + values: (result.body.deleteCertificateError?.["values"] ?? []).map( + (p) => ({ name: p["name"], value: p["value"] }) + ), + }, + data: + typeof result.body["data"] === "string" + ? stringToUint8Array(result.body["data"], "base64") + : result.body["data"], + certificateFormat: result.body["certificateFormat"], + password: result.body["password"], + }; +} + +/** Gets information about the specified Certificate. */ +export async function getCertificate( + context: Client, + thumbprintAlgorithm: string, + thumbprint: string, + options: GetCertificateOptions = { requestOptions: {} } +): Promise { + const result = await _getCertificateSend( + context, + thumbprintAlgorithm, + thumbprint, + options + ); + return _getCertificateDeserialize(result); +} + +export function _jobScheduleExistsSend( + context: Client, + jobScheduleId: string, + options: JobScheduleExistsOptions = { requestOptions: {} } +): StreamableMethod< + | JobScheduleExists200Response + | JobScheduleExists404Response + | JobScheduleExistsDefaultResponse +> { + return context + .path("/jobschedules/{jobScheduleId}", jobScheduleId) + .head({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _jobScheduleExistsDeserialize( + result: + | JobScheduleExists200Response + | JobScheduleExists404Response + | JobScheduleExistsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Checks the specified Job Schedule exists. */ +export async function jobScheduleExists( + context: Client, + jobScheduleId: string, + options: JobScheduleExistsOptions = { requestOptions: {} } +): Promise { + const result = await _jobScheduleExistsSend(context, jobScheduleId, options); + return _jobScheduleExistsDeserialize(result); +} + +export function _deleteJobScheduleSend( + context: Client, + jobScheduleId: string, + options: DeleteJobScheduleOptions = { requestOptions: {} } +): StreamableMethod< + DeleteJobSchedule202Response | DeleteJobScheduleDefaultResponse +> { + return context + .path("/jobschedules/{jobScheduleId}", jobScheduleId) + .delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _deleteJobScheduleDeserialize( + result: DeleteJobSchedule202Response | DeleteJobScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * When you delete a Job Schedule, this also deletes all Jobs and Tasks under that + * schedule. When Tasks are deleted, all the files in their working directories on + * the Compute Nodes are also deleted (the retention period is ignored). The Job + * Schedule statistics are no longer accessible once the Job Schedule is deleted, + * though they are still counted towards Account lifetime statistics. + */ +export async function deleteJobSchedule( + context: Client, + jobScheduleId: string, + options: DeleteJobScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _deleteJobScheduleSend(context, jobScheduleId, options); + return _deleteJobScheduleDeserialize(result); +} + +export function _getJobScheduleSend( + context: Client, + jobScheduleId: string, + options: GetJobScheduleOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobschedules/{jobScheduleId}", jobScheduleId) + .get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { + timeOut: options?.timeOut, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _getJobScheduleDeserialize( + result: GetJobSchedule200Response | GetJobScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + displayName: result.body["displayName"], + url: result.body["url"], + eTag: result.body["eTag"], + lastModified: + result.body["lastModified"] !== undefined + ? new Date(result.body["lastModified"]) + : undefined, + creationTime: + result.body["creationTime"] !== undefined + ? new Date(result.body["creationTime"]) + : undefined, + state: result.body["state"], + stateTransitionTime: + result.body["stateTransitionTime"] !== undefined + ? new Date(result.body["stateTransitionTime"]) + : undefined, + previousState: result.body["previousState"], + previousStateTransitionTime: + result.body["previousStateTransitionTime"] !== undefined + ? new Date(result.body["previousStateTransitionTime"]) + : undefined, + schedule: { + doNotRunUntil: + result.body.schedule["doNotRunUntil"] !== undefined + ? new Date(result.body.schedule["doNotRunUntil"]) + : undefined, + doNotRunAfter: + result.body.schedule["doNotRunAfter"] !== undefined + ? new Date(result.body.schedule["doNotRunAfter"]) + : undefined, + startWindow: result.body.schedule["startWindow"], + recurrenceInterval: result.body.schedule["recurrenceInterval"], + }, + jobSpecification: { + priority: result.body.jobSpecification["priority"], + allowTaskPreemption: result.body.jobSpecification["allowTaskPreemption"], + maxParallelTasks: result.body.jobSpecification["maxParallelTasks"], + displayName: result.body.jobSpecification["displayName"], + usesTaskDependencies: + result.body.jobSpecification["usesTaskDependencies"], + onAllTasksComplete: result.body.jobSpecification["onAllTasksComplete"], + onTaskFailure: result.body.jobSpecification["onTaskFailure"], + networkConfiguration: !result.body.jobSpecification.networkConfiguration + ? undefined + : { + subnetId: + result.body.jobSpecification.networkConfiguration?.["subnetId"], + }, + constraints: !result.body.jobSpecification.constraints + ? undefined + : { + maxWallClockTime: + result.body.jobSpecification.constraints?.["maxWallClockTime"], + maxTaskRetryCount: + result.body.jobSpecification.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !result.body.jobSpecification.jobManagerTask + ? undefined + : { + id: result.body.jobSpecification.jobManagerTask?.["id"], + displayName: + result.body.jobSpecification.jobManagerTask?.["displayName"], + commandLine: + result.body.jobSpecification.jobManagerTask?.["commandLine"], + containerSettings: !result.body.jobSpecification.jobManagerTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.jobSpecification.jobManagerTask + ?.containerSettings?.["containerRunOptions"], + imageName: + result.body.jobSpecification.jobManagerTask + ?.containerSettings?.["imageName"], + registry: !result.body.jobSpecification.jobManagerTask + ?.containerSettings?.registry + ? undefined + : { + username: + result.body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.["username"], + password: + result.body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.["password"], + registryServer: + result.body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.["registryServer"], + identityReference: !result.body.jobSpecification + .jobManagerTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + result.body.jobSpecification.jobManagerTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + result.body.jobSpecification.jobManagerTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + result.body.jobSpecification.jobManagerTask?.["resourceFiles"] ?? + [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: ( + result.body.jobSpecification.jobManagerTask?.["outputFiles"] ?? [] + ).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: ( + result.body.jobSpecification.jobManagerTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !result.body.jobSpecification.jobManagerTask + ?.constraints + ? undefined + : { + maxWallClockTime: + result.body.jobSpecification.jobManagerTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + result.body.jobSpecification.jobManagerTask?.constraints?.[ + "retentionTime" + ], + maxTaskRetryCount: + result.body.jobSpecification.jobManagerTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + requiredSlots: + result.body.jobSpecification.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: + result.body.jobSpecification.jobManagerTask?.[ + "killJobOnCompletion" + ], + userIdentity: !result.body.jobSpecification.jobManagerTask + ?.userIdentity + ? undefined + : { + username: + result.body.jobSpecification.jobManagerTask?.userIdentity?.[ + "username" + ], + autoUser: !result.body.jobSpecification.jobManagerTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.jobSpecification.jobManagerTask + ?.userIdentity?.autoUser?.["scope"], + elevationLevel: + result.body.jobSpecification.jobManagerTask + ?.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + runExclusive: + result.body.jobSpecification.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + result.body.jobSpecification.jobManagerTask?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !result.body.jobSpecification + .jobManagerTask?.authenticationTokenSettings + ? undefined + : { + access: + result.body.jobSpecification.jobManagerTask + ?.authenticationTokenSettings?.["access"], + }, + allowLowPriorityNode: + result.body.jobSpecification.jobManagerTask?.[ + "allowLowPriorityNode" + ], + }, + jobPreparationTask: !result.body.jobSpecification.jobPreparationTask + ? undefined + : { + id: result.body.jobSpecification.jobPreparationTask?.["id"], + commandLine: + result.body.jobSpecification.jobPreparationTask?.["commandLine"], + containerSettings: !result.body.jobSpecification.jobPreparationTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.jobSpecification.jobPreparationTask + ?.containerSettings?.["containerRunOptions"], + imageName: + result.body.jobSpecification.jobPreparationTask + ?.containerSettings?.["imageName"], + registry: !result.body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry + ? undefined + : { + username: + result.body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["username"], + password: + result.body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["password"], + registryServer: + result.body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["registryServer"], + identityReference: !result.body.jobSpecification + .jobPreparationTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + result.body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + result.body.jobSpecification.jobPreparationTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + result.body.jobSpecification.jobPreparationTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + result.body.jobSpecification.jobPreparationTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !result.body.jobSpecification.jobPreparationTask + ?.constraints + ? undefined + : { + maxWallClockTime: + result.body.jobSpecification.jobPreparationTask + ?.constraints?.["maxWallClockTime"], + retentionTime: + result.body.jobSpecification.jobPreparationTask + ?.constraints?.["retentionTime"], + maxTaskRetryCount: + result.body.jobSpecification.jobPreparationTask + ?.constraints?.["maxTaskRetryCount"], + }, + waitForSuccess: + result.body.jobSpecification.jobPreparationTask?.[ + "waitForSuccess" + ], + userIdentity: !result.body.jobSpecification.jobPreparationTask + ?.userIdentity + ? undefined + : { + username: + result.body.jobSpecification.jobPreparationTask + ?.userIdentity?.["username"], + autoUser: !result.body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser?.["scope"], + elevationLevel: + result.body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + rerunOnNodeRebootAfterSuccess: + result.body.jobSpecification.jobPreparationTask?.[ + "rerunOnNodeRebootAfterSuccess" + ], + }, + jobReleaseTask: !result.body.jobSpecification.jobReleaseTask + ? undefined + : { + id: result.body.jobSpecification.jobReleaseTask?.["id"], + commandLine: + result.body.jobSpecification.jobReleaseTask?.["commandLine"], + containerSettings: !result.body.jobSpecification.jobReleaseTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.jobSpecification.jobReleaseTask + ?.containerSettings?.["containerRunOptions"], + imageName: + result.body.jobSpecification.jobReleaseTask + ?.containerSettings?.["imageName"], + registry: !result.body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry + ? undefined + : { + username: + result.body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.["username"], + password: + result.body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.["password"], + registryServer: + result.body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.["registryServer"], + identityReference: !result.body.jobSpecification + .jobReleaseTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + result.body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + result.body.jobSpecification.jobReleaseTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + result.body.jobSpecification.jobReleaseTask?.["resourceFiles"] ?? + [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + result.body.jobSpecification.jobReleaseTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: + result.body.jobSpecification.jobReleaseTask?.["maxWallClockTime"], + retentionTime: + result.body.jobSpecification.jobReleaseTask?.["retentionTime"], + userIdentity: !result.body.jobSpecification.jobReleaseTask + ?.userIdentity + ? undefined + : { + username: + result.body.jobSpecification.jobReleaseTask?.userIdentity?.[ + "username" + ], + autoUser: !result.body.jobSpecification.jobReleaseTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.jobSpecification.jobReleaseTask + ?.userIdentity?.autoUser?.["scope"], + elevationLevel: + result.body.jobSpecification.jobReleaseTask + ?.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + }, + commonEnvironmentSettings: ( + result.body.jobSpecification["commonEnvironmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + poolInfo: { + poolId: result.body.jobSpecification.poolInfo["poolId"], + autoPoolSpecification: !result.body.jobSpecification.poolInfo + .autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + result.body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "autoPoolIdPrefix" + ], + poolLifetimeOption: + result.body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "poolLifetimeOption" + ], + keepAlive: + result.body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "keepAlive" + ], + pool: !result.body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool + ? undefined + : { + displayName: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["displayName"], + vmSize: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["vmSize"], + cloudServiceConfiguration: !result.body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration + ? undefined + : { + osFamily: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !result.body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "publisher" + ], + offer: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "offer" + ], + sku: result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "version" + ], + virtualMachineImageId: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + exactVersion: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "exactVersion" + ], + }, + nodeAgentSKUId: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["nodeAgentSKUId"], + windowsConfiguration: !result.body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["dataDisks"] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["licenseType"], + containerConfiguration: !result.body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + }, + diskEncryptionConfiguration: !result.body + .jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.["targets"], + }, + nodePlacementConfiguration: !result.body + .jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.["extensions"] ?? + [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !result.body + .jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["taskSlotsPerNode"], + taskSchedulingPolicy: !result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["resizeTimeout"], + targetDedicatedNodes: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["targetDedicatedNodes"], + targetLowPriorityNodes: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.[ + "targetLowPriorityNodes" + ], + enableAutoScale: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["enableAutoScale"], + autoScaleFormula: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["autoScaleFormula"], + autoScaleEvaluationInterval: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.[ + "autoScaleEvaluationInterval" + ], + enableInterNodeCommunication: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.[ + "enableInterNodeCommunication" + ], + networkConfiguration: !result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ? undefined + : { + subnetId: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !result.body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !result.body + .jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ? undefined + : { + commandLine: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "commandLine" + ], + containerSettings: !result.body.jobSpecification + .poolInfo.autoPoolSpecification?.pool?.startTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.["imageName"], + registry: !result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ? undefined + : { + username: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["username"], + password: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !result.body + .jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + result.body.jobSpecification + .poolInfo.autoPoolSpecification + ?.pool?.startTask + ?.containerSettings?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity + ? undefined + : { + username: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.["username"], + autoUser: !result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "maxTaskRetryCount" + ], + waitForSuccess: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "waitForSuccess" + ], + }, + certificateReferences: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.[ + "certificateReferences" + ] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["applicationLicenses"], + userAccounts: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["userAccounts"] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["metadata"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.["mountConfiguration"] ?? + [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.["sasKey"], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.["accountName"], + azureFileUrl: + p.azureFileShareConfiguration?.["azureFileUrl"], + accountKey: + p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.["mountOptions"], + }, + })), + targetNodeCommunicationMode: + result.body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.[ + "targetNodeCommunicationMode" + ], + }, + }, + }, + metadata: (result.body.jobSpecification["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + executionInfo: !result.body.executionInfo + ? undefined + : { + nextRunTime: + result.body.executionInfo?.["nextRunTime"] !== undefined + ? new Date(result.body.executionInfo?.["nextRunTime"]) + : undefined, + recentJob: !result.body.executionInfo?.recentJob + ? undefined + : { + id: result.body.executionInfo?.recentJob?.["id"], + url: result.body.executionInfo?.recentJob?.["url"], + }, + endTime: + result.body.executionInfo?.["endTime"] !== undefined + ? new Date(result.body.executionInfo?.["endTime"]) + : undefined, + }, + metadata: (result.body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + stats: !result.body.stats + ? undefined + : { + url: result.body.stats?.["url"], + startTime: new Date(result.body.stats?.["startTime"]), + lastUpdateTime: new Date(result.body.stats?.["lastUpdateTime"]), + userCPUTime: result.body.stats?.["userCPUTime"], + kernelCPUTime: result.body.stats?.["kernelCPUTime"], + wallClockTime: result.body.stats?.["wallClockTime"], + readIOps: result.body.stats?.["readIOps"], + writeIOps: result.body.stats?.["writeIOps"], + readIOGiB: result.body.stats?.["readIOGiB"], + writeIOGiB: result.body.stats?.["writeIOGiB"], + numSucceededTasks: result.body.stats?.["numSucceededTasks"], + numFailedTasks: result.body.stats?.["numFailedTasks"], + numTaskRetries: result.body.stats?.["numTaskRetries"], + waitTime: result.body.stats?.["waitTime"], + }, + }; +} + +/** Gets information about the specified Job Schedule. */ +export async function getJobSchedule( + context: Client, + jobScheduleId: string, + options: GetJobScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _getJobScheduleSend(context, jobScheduleId, options); + return _getJobScheduleDeserialize(result); +} + +export function _updateJobScheduleSend( + context: Client, + jobScheduleId: string, + body: BatchJobScheduleUpdateOptions, + options: UpdateJobScheduleOptions = { requestOptions: {} } +): StreamableMethod< + UpdateJobSchedule200Response | UpdateJobScheduleDefaultResponse +> { + return context + .path("/jobschedules/{jobScheduleId}", jobScheduleId) + .patch({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + schedule: !body.schedule + ? undefined + : { + doNotRunUntil: body.schedule?.["doNotRunUntil"]?.toISOString(), + doNotRunAfter: body.schedule?.["doNotRunAfter"]?.toISOString(), + startWindow: body.schedule?.["startWindow"], + recurrenceInterval: body.schedule?.["recurrenceInterval"], + }, + jobSpecification: !body.jobSpecification + ? undefined + : { + priority: body.jobSpecification?.["priority"], + allowTaskPreemption: + body.jobSpecification?.["allowTaskPreemption"], + maxParallelTasks: body.jobSpecification?.["maxParallelTasks"], + displayName: body.jobSpecification?.["displayName"], + usesTaskDependencies: + body.jobSpecification?.["usesTaskDependencies"], + onAllTasksComplete: body.jobSpecification?.["onAllTasksComplete"], + onTaskFailure: body.jobSpecification?.["onTaskFailure"], + networkConfiguration: !body.jobSpecification?.networkConfiguration + ? undefined + : { + subnetId: + body.jobSpecification?.networkConfiguration?.["subnetId"], + }, + constraints: !body.jobSpecification?.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification?.constraints?.["maxWallClockTime"], + maxTaskRetryCount: + body.jobSpecification?.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !body.jobSpecification?.jobManagerTask + ? undefined + : { + id: body.jobSpecification?.jobManagerTask?.["id"], + displayName: + body.jobSpecification?.jobManagerTask?.["displayName"], + commandLine: + body.jobSpecification?.jobManagerTask?.["commandLine"], + containerSettings: !body.jobSpecification?.jobManagerTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification?.jobManagerTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification?.jobManagerTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification?.jobManagerTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification?.jobManagerTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification?.jobManagerTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification?.jobManagerTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + ?.jobManagerTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification?.jobManagerTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification?.jobManagerTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification?.jobManagerTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: ( + body.jobSpecification?.jobManagerTask?.["outputFiles"] ?? + [] + ).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: + p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container + ?.identityReference?.["resourceId"], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: ( + body.jobSpecification?.jobManagerTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !body.jobSpecification?.jobManagerTask + ?.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification?.jobManagerTask + ?.constraints?.["maxWallClockTime"], + retentionTime: + body.jobSpecification?.jobManagerTask + ?.constraints?.["retentionTime"], + maxTaskRetryCount: + body.jobSpecification?.jobManagerTask + ?.constraints?.["maxTaskRetryCount"], + }, + requiredSlots: + body.jobSpecification?.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: + body.jobSpecification?.jobManagerTask?.[ + "killJobOnCompletion" + ], + userIdentity: !body.jobSpecification?.jobManagerTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification?.jobManagerTask + ?.userIdentity?.["username"], + autoUser: !body.jobSpecification?.jobManagerTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification?.jobManagerTask + ?.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification?.jobManagerTask + ?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + runExclusive: + body.jobSpecification?.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + body.jobSpecification?.jobManagerTask?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !body.jobSpecification + ?.jobManagerTask?.authenticationTokenSettings + ? undefined + : { + access: + body.jobSpecification?.jobManagerTask + ?.authenticationTokenSettings?.["access"], + }, + allowLowPriorityNode: + body.jobSpecification?.jobManagerTask?.[ + "allowLowPriorityNode" + ], + }, + jobPreparationTask: !body.jobSpecification?.jobPreparationTask + ? undefined + : { + id: body.jobSpecification?.jobPreparationTask?.["id"], + commandLine: + body.jobSpecification?.jobPreparationTask?.[ + "commandLine" + ], + containerSettings: !body.jobSpecification + ?.jobPreparationTask?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification?.jobPreparationTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification?.jobPreparationTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification?.jobPreparationTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification?.jobPreparationTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification?.jobPreparationTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification?.jobPreparationTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + ?.jobPreparationTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification + ?.jobPreparationTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification?.jobPreparationTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification?.jobPreparationTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + body.jobSpecification?.jobPreparationTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !body.jobSpecification?.jobPreparationTask + ?.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification?.jobPreparationTask + ?.constraints?.["maxWallClockTime"], + retentionTime: + body.jobSpecification?.jobPreparationTask + ?.constraints?.["retentionTime"], + maxTaskRetryCount: + body.jobSpecification?.jobPreparationTask + ?.constraints?.["maxTaskRetryCount"], + }, + waitForSuccess: + body.jobSpecification?.jobPreparationTask?.[ + "waitForSuccess" + ], + userIdentity: !body.jobSpecification?.jobPreparationTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification?.jobPreparationTask + ?.userIdentity?.["username"], + autoUser: !body.jobSpecification?.jobPreparationTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification?.jobPreparationTask + ?.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification?.jobPreparationTask + ?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + rerunOnNodeRebootAfterSuccess: + body.jobSpecification?.jobPreparationTask?.[ + "rerunOnNodeRebootAfterSuccess" + ], + }, + jobReleaseTask: !body.jobSpecification?.jobReleaseTask + ? undefined + : { + id: body.jobSpecification?.jobReleaseTask?.["id"], + commandLine: + body.jobSpecification?.jobReleaseTask?.["commandLine"], + containerSettings: !body.jobSpecification?.jobReleaseTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification?.jobReleaseTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification?.jobReleaseTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification?.jobReleaseTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification?.jobReleaseTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification?.jobReleaseTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification?.jobReleaseTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + ?.jobReleaseTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification?.jobReleaseTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification?.jobReleaseTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification?.jobReleaseTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + body.jobSpecification?.jobReleaseTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: + body.jobSpecification?.jobReleaseTask?.[ + "maxWallClockTime" + ], + retentionTime: + body.jobSpecification?.jobReleaseTask?.["retentionTime"], + userIdentity: !body.jobSpecification?.jobReleaseTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification?.jobReleaseTask + ?.userIdentity?.["username"], + autoUser: !body.jobSpecification?.jobReleaseTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification?.jobReleaseTask + ?.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification?.jobReleaseTask + ?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + }, + commonEnvironmentSettings: ( + body.jobSpecification?.["commonEnvironmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + poolInfo: { + poolId: body.jobSpecification?.poolInfo["poolId"], + autoPoolSpecification: !body.jobSpecification?.poolInfo + .autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + body.jobSpecification?.poolInfo.autoPoolSpecification?.[ + "autoPoolIdPrefix" + ], + poolLifetimeOption: + body.jobSpecification?.poolInfo.autoPoolSpecification?.[ + "poolLifetimeOption" + ], + keepAlive: + body.jobSpecification?.poolInfo.autoPoolSpecification?.[ + "keepAlive" + ], + pool: !body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ? undefined + : { + displayName: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.["displayName"], + vmSize: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.["vmSize"], + cloudServiceConfiguration: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration + ? undefined + : { + osFamily: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.[ + "osVersion" + ], + }, + virtualMachineConfiguration: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["publisher"], + offer: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["offer"], + sku: body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["sku"], + version: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["version"], + virtualMachineImageId: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference[ + "virtualMachineImageId" + ], + }, + nodeAgentSKUId: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "nodeAgentSKUId" + ], + windowsConfiguration: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "dataDisks" + ] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "licenseType" + ], + containerConfiguration: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: + !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.[ + "resourceId" + ], + }, + })), + }, + diskEncryptionConfiguration: !body + .jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.[ + "targets" + ], + }, + nodePlacementConfiguration: !body + .jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.[ + "policy" + ], + }, + extensions: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "extensions" + ] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: + p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !body + .jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.osDisk + ?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "taskSlotsPerNode" + ], + taskSchedulingPolicy: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.["resizeTimeout"], + targetDedicatedNodes: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "targetDedicatedNodes" + ], + targetLowPriorityNodes: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "targetLowPriorityNodes" + ], + enableAutoScale: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "enableAutoScale" + ], + autoScaleFormula: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "autoScaleFormula" + ], + autoScaleEvaluationInterval: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "autoScaleEvaluationInterval" + ], + enableInterNodeCommunication: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "enableInterNodeCommunication" + ], + networkConfiguration: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ? undefined + : { + subnetId: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: + p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !body + .jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.startTask + ? undefined + : { + commandLine: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "commandLine" + ], + containerSettings: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "imageName" + ], + registry: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry + ? undefined + : { + username: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["username"], + password: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.[ + "registryServer" + ], + identityReference: !body + .jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification + ?.poolInfo + .autoPoolSpecification + ?.pool?.startTask + ?.containerSettings + ?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: + p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity?.[ + "username" + ], + autoUser: !body.jobSpecification + ?.poolInfo.autoPoolSpecification?.pool + ?.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "maxTaskRetryCount" + ], + waitForSuccess: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "waitForSuccess" + ], + }, + certificateReferences: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "certificateReferences" + ] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "applicationLicenses" + ], + userAccounts: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "userAccounts" + ] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.[ + "sshPrivateKey" + ], + }, + windowsUserConfiguration: + !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.[ + "loginMode" + ], + }, + })), + metadata: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.["metadata"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + mountConfiguration: ( + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "mountConfiguration" + ] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.[ + "sasKey" + ], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.[ + "resourceId" + ], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: + p.cifsMountConfiguration?.["username"], + source: + p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.cifsMountConfiguration?.[ + "mountOptions" + ], + password: + p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.[ + "accountName" + ], + azureFileUrl: + p.azureFileShareConfiguration?.[ + "azureFileUrl" + ], + accountKey: + p.azureFileShareConfiguration?.[ + "accountKey" + ], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.[ + "mountOptions" + ], + }, + })), + targetNodeCommunicationMode: + body.jobSpecification?.poolInfo + .autoPoolSpecification?.pool?.[ + "targetNodeCommunicationMode" + ], + }, + }, + }, + metadata: (body.jobSpecification?.["metadata"] ?? []).map( + (p) => ({ name: p["name"], value: p["value"] }) + ), + }, + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }); +} + +export async function _updateJobScheduleDeserialize( + result: UpdateJobSchedule200Response | UpdateJobScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This replaces only the Job Schedule properties specified in the request. For + * example, if the schedule property is not specified with this request, then the + * Batch service will keep the existing schedule. Changes to a Job Schedule only + * impact Jobs created by the schedule after the update has taken place; currently + * running Jobs are unaffected. + */ +export async function updateJobSchedule( + context: Client, + jobScheduleId: string, + body: BatchJobScheduleUpdateOptions, + options: UpdateJobScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _updateJobScheduleSend( + context, + jobScheduleId, + body, + options + ); + return _updateJobScheduleDeserialize(result); +} + +export function _replaceJobScheduleSend( + context: Client, + jobScheduleId: string, + body: BatchJobSchedule, + options: ReplaceJobScheduleOptions = { requestOptions: {} } +): StreamableMethod< + ReplaceJobSchedule200Response | ReplaceJobScheduleDefaultResponse +> { + return context + .path("/jobschedules/{jobScheduleId}", jobScheduleId) + .put({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + schedule: { + doNotRunUntil: body.schedule["doNotRunUntil"]?.toISOString(), + doNotRunAfter: body.schedule["doNotRunAfter"]?.toISOString(), + startWindow: body.schedule["startWindow"], + recurrenceInterval: body.schedule["recurrenceInterval"], + }, + jobSpecification: { + priority: body.jobSpecification["priority"], + allowTaskPreemption: body.jobSpecification["allowTaskPreemption"], + maxParallelTasks: body.jobSpecification["maxParallelTasks"], + displayName: body.jobSpecification["displayName"], + usesTaskDependencies: body.jobSpecification["usesTaskDependencies"], + onAllTasksComplete: body.jobSpecification["onAllTasksComplete"], + onTaskFailure: body.jobSpecification["onTaskFailure"], + networkConfiguration: !body.jobSpecification.networkConfiguration + ? undefined + : { + subnetId: + body.jobSpecification.networkConfiguration?.["subnetId"], + }, + constraints: !body.jobSpecification.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification.constraints?.["maxWallClockTime"], + maxTaskRetryCount: + body.jobSpecification.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !body.jobSpecification.jobManagerTask + ? undefined + : { + id: body.jobSpecification.jobManagerTask?.["id"], + displayName: + body.jobSpecification.jobManagerTask?.["displayName"], + commandLine: + body.jobSpecification.jobManagerTask?.["commandLine"], + containerSettings: !body.jobSpecification.jobManagerTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification.jobManagerTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification.jobManagerTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification.jobManagerTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + .jobManagerTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification.jobManagerTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification.jobManagerTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification.jobManagerTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: ( + body.jobSpecification.jobManagerTask?.["outputFiles"] ?? [] + ).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: + p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: ( + body.jobSpecification.jobManagerTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !body.jobSpecification.jobManagerTask?.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification.jobManagerTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + body.jobSpecification.jobManagerTask?.constraints?.[ + "retentionTime" + ], + maxTaskRetryCount: + body.jobSpecification.jobManagerTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + requiredSlots: + body.jobSpecification.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: + body.jobSpecification.jobManagerTask?.["killJobOnCompletion"], + userIdentity: !body.jobSpecification.jobManagerTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification.jobManagerTask?.userIdentity?.[ + "username" + ], + autoUser: !body.jobSpecification.jobManagerTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification.jobManagerTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification.jobManagerTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + runExclusive: + body.jobSpecification.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + body.jobSpecification.jobManagerTask?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !body.jobSpecification + .jobManagerTask?.authenticationTokenSettings + ? undefined + : { + access: + body.jobSpecification.jobManagerTask + ?.authenticationTokenSettings?.["access"], + }, + allowLowPriorityNode: + body.jobSpecification.jobManagerTask?.[ + "allowLowPriorityNode" + ], + }, + jobPreparationTask: !body.jobSpecification.jobPreparationTask + ? undefined + : { + id: body.jobSpecification.jobPreparationTask?.["id"], + commandLine: + body.jobSpecification.jobPreparationTask?.["commandLine"], + containerSettings: !body.jobSpecification.jobPreparationTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + .jobPreparationTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification.jobPreparationTask?.["resourceFiles"] ?? + [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + body.jobSpecification.jobPreparationTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !body.jobSpecification.jobPreparationTask + ?.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification.jobPreparationTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + body.jobSpecification.jobPreparationTask?.constraints?.[ + "retentionTime" + ], + maxTaskRetryCount: + body.jobSpecification.jobPreparationTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + waitForSuccess: + body.jobSpecification.jobPreparationTask?.["waitForSuccess"], + userIdentity: !body.jobSpecification.jobPreparationTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification.jobPreparationTask + ?.userIdentity?.["username"], + autoUser: !body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + rerunOnNodeRebootAfterSuccess: + body.jobSpecification.jobPreparationTask?.[ + "rerunOnNodeRebootAfterSuccess" + ], + }, + jobReleaseTask: !body.jobSpecification.jobReleaseTask + ? undefined + : { + id: body.jobSpecification.jobReleaseTask?.["id"], + commandLine: + body.jobSpecification.jobReleaseTask?.["commandLine"], + containerSettings: !body.jobSpecification.jobReleaseTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + .jobReleaseTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification.jobReleaseTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + body.jobSpecification.jobReleaseTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: + body.jobSpecification.jobReleaseTask?.["maxWallClockTime"], + retentionTime: + body.jobSpecification.jobReleaseTask?.["retentionTime"], + userIdentity: !body.jobSpecification.jobReleaseTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification.jobReleaseTask?.userIdentity?.[ + "username" + ], + autoUser: !body.jobSpecification.jobReleaseTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification.jobReleaseTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification.jobReleaseTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + }, + commonEnvironmentSettings: ( + body.jobSpecification["commonEnvironmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + poolInfo: { + poolId: body.jobSpecification.poolInfo["poolId"], + autoPoolSpecification: !body.jobSpecification.poolInfo + .autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "autoPoolIdPrefix" + ], + poolLifetimeOption: + body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "poolLifetimeOption" + ], + keepAlive: + body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "keepAlive" + ], + pool: !body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool + ? undefined + : { + displayName: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["displayName"], + vmSize: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["vmSize"], + cloudServiceConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration + ? undefined + : { + osFamily: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["publisher"], + offer: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["offer"], + sku: body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["version"], + virtualMachineImageId: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["virtualMachineImageId"], + }, + nodeAgentSKUId: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "nodeAgentSKUId" + ], + windowsConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "dataDisks" + ] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "licenseType" + ], + containerConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.[ + "resourceId" + ], + }, + })), + }, + diskEncryptionConfiguration: !body + .jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.[ + "targets" + ], + }, + nodePlacementConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.[ + "policy" + ], + }, + extensions: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "extensions" + ] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: + p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !body + .jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.osDisk + ?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["taskSlotsPerNode"], + taskSchedulingPolicy: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["resizeTimeout"], + targetDedicatedNodes: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetDedicatedNodes"], + targetLowPriorityNodes: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetLowPriorityNodes"], + enableAutoScale: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["enableAutoScale"], + autoScaleFormula: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["autoScaleFormula"], + autoScaleEvaluationInterval: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["autoScaleEvaluationInterval"], + enableInterNodeCommunication: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["enableInterNodeCommunication"], + networkConfiguration: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ? undefined + : { + subnetId: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !body + .jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ? undefined + : { + commandLine: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "commandLine" + ], + containerSettings: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["username"], + password: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !body + .jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask + ?.containerSettings + ?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.["username"], + autoUser: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + maxTaskRetryCount: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "maxTaskRetryCount" + ], + waitForSuccess: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "waitForSuccess" + ], + }, + certificateReferences: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["certificateReferences"] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["applicationLicenses"], + userAccounts: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["userAccounts"] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["metadata"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["mountConfiguration"] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.[ + "sasKey" + ], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: + p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: + p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.[ + "accountName" + ], + azureFileUrl: + p.azureFileShareConfiguration?.[ + "azureFileUrl" + ], + accountKey: + p.azureFileShareConfiguration?.[ + "accountKey" + ], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.[ + "mountOptions" + ], + }, + })), + targetNodeCommunicationMode: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetNodeCommunicationMode"], + }, + }, + }, + metadata: (body.jobSpecification["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }); +} + +export async function _replaceJobScheduleDeserialize( + result: ReplaceJobSchedule200Response | ReplaceJobScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This fully replaces all the updatable properties of the Job Schedule. For + * example, if the schedule property is not specified with this request, then the + * Batch service will remove the existing schedule. Changes to a Job Schedule only + * impact Jobs created by the schedule after the update has taken place; currently + * running Jobs are unaffected. + */ +export async function replaceJobSchedule( + context: Client, + jobScheduleId: string, + body: BatchJobSchedule, + options: ReplaceJobScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _replaceJobScheduleSend( + context, + jobScheduleId, + body, + options + ); + return _replaceJobScheduleDeserialize(result); +} + +export function _disableJobScheduleSend( + context: Client, + jobScheduleId: string, + options: DisableJobScheduleOptions = { requestOptions: {} } +): StreamableMethod< + DisableJobSchedule204Response | DisableJobScheduleDefaultResponse +> { + return context + .path("/jobschedules/{jobScheduleId}/disable", jobScheduleId) + .post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _disableJobScheduleDeserialize( + result: DisableJobSchedule204Response | DisableJobScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** No new Jobs will be created until the Job Schedule is enabled again. */ +export async function disableJobSchedule( + context: Client, + jobScheduleId: string, + options: DisableJobScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _disableJobScheduleSend(context, jobScheduleId, options); + return _disableJobScheduleDeserialize(result); +} + +export function _enableJobScheduleSend( + context: Client, + jobScheduleId: string, + options: EnableJobScheduleOptions = { requestOptions: {} } +): StreamableMethod< + EnableJobSchedule204Response | EnableJobScheduleDefaultResponse +> { + return context + .path("/jobschedules/{jobScheduleId}/enable", jobScheduleId) + .post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _enableJobScheduleDeserialize( + result: EnableJobSchedule204Response | EnableJobScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Enables a Job Schedule. */ +export async function enableJobSchedule( + context: Client, + jobScheduleId: string, + options: EnableJobScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _enableJobScheduleSend(context, jobScheduleId, options); + return _enableJobScheduleDeserialize(result); +} + +export function _terminateJobScheduleSend( + context: Client, + jobScheduleId: string, + options: TerminateJobScheduleOptions = { requestOptions: {} } +): StreamableMethod< + TerminateJobSchedule202Response | TerminateJobScheduleDefaultResponse +> { + return context + .path("/jobschedules/{jobScheduleId}/terminate", jobScheduleId) + .post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _terminateJobScheduleDeserialize( + result: TerminateJobSchedule202Response | TerminateJobScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Terminates a Job Schedule. */ +export async function terminateJobSchedule( + context: Client, + jobScheduleId: string, + options: TerminateJobScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _terminateJobScheduleSend( + context, + jobScheduleId, + options + ); + return _terminateJobScheduleDeserialize(result); +} + +export function _createJobScheduleSend( + context: Client, + body: BatchJobScheduleCreateOptions, + options: CreateJobScheduleOptions = { requestOptions: {} } +): StreamableMethod< + CreateJobSchedule201Response | CreateJobScheduleDefaultResponse +> { + return context + .path("/jobschedules") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + id: body["id"], + displayName: body["displayName"], + schedule: { + doNotRunUntil: body.schedule["doNotRunUntil"]?.toISOString(), + doNotRunAfter: body.schedule["doNotRunAfter"]?.toISOString(), + startWindow: body.schedule["startWindow"], + recurrenceInterval: body.schedule["recurrenceInterval"], + }, + jobSpecification: { + priority: body.jobSpecification["priority"], + allowTaskPreemption: body.jobSpecification["allowTaskPreemption"], + maxParallelTasks: body.jobSpecification["maxParallelTasks"], + displayName: body.jobSpecification["displayName"], + usesTaskDependencies: body.jobSpecification["usesTaskDependencies"], + onAllTasksComplete: body.jobSpecification["onAllTasksComplete"], + onTaskFailure: body.jobSpecification["onTaskFailure"], + networkConfiguration: !body.jobSpecification.networkConfiguration + ? undefined + : { + subnetId: + body.jobSpecification.networkConfiguration?.["subnetId"], + }, + constraints: !body.jobSpecification.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification.constraints?.["maxWallClockTime"], + maxTaskRetryCount: + body.jobSpecification.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !body.jobSpecification.jobManagerTask + ? undefined + : { + id: body.jobSpecification.jobManagerTask?.["id"], + displayName: + body.jobSpecification.jobManagerTask?.["displayName"], + commandLine: + body.jobSpecification.jobManagerTask?.["commandLine"], + containerSettings: !body.jobSpecification.jobManagerTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification.jobManagerTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification.jobManagerTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification.jobManagerTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + .jobManagerTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification.jobManagerTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification.jobManagerTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification.jobManagerTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: ( + body.jobSpecification.jobManagerTask?.["outputFiles"] ?? [] + ).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: + p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: ( + body.jobSpecification.jobManagerTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !body.jobSpecification.jobManagerTask?.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification.jobManagerTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + body.jobSpecification.jobManagerTask?.constraints?.[ + "retentionTime" + ], + maxTaskRetryCount: + body.jobSpecification.jobManagerTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + requiredSlots: + body.jobSpecification.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: + body.jobSpecification.jobManagerTask?.["killJobOnCompletion"], + userIdentity: !body.jobSpecification.jobManagerTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification.jobManagerTask?.userIdentity?.[ + "username" + ], + autoUser: !body.jobSpecification.jobManagerTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification.jobManagerTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification.jobManagerTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + runExclusive: + body.jobSpecification.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + body.jobSpecification.jobManagerTask?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !body.jobSpecification + .jobManagerTask?.authenticationTokenSettings + ? undefined + : { + access: + body.jobSpecification.jobManagerTask + ?.authenticationTokenSettings?.["access"], + }, + allowLowPriorityNode: + body.jobSpecification.jobManagerTask?.[ + "allowLowPriorityNode" + ], + }, + jobPreparationTask: !body.jobSpecification.jobPreparationTask + ? undefined + : { + id: body.jobSpecification.jobPreparationTask?.["id"], + commandLine: + body.jobSpecification.jobPreparationTask?.["commandLine"], + containerSettings: !body.jobSpecification.jobPreparationTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + .jobPreparationTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification.jobPreparationTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification.jobPreparationTask?.["resourceFiles"] ?? + [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + body.jobSpecification.jobPreparationTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !body.jobSpecification.jobPreparationTask + ?.constraints + ? undefined + : { + maxWallClockTime: + body.jobSpecification.jobPreparationTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + body.jobSpecification.jobPreparationTask?.constraints?.[ + "retentionTime" + ], + maxTaskRetryCount: + body.jobSpecification.jobPreparationTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + waitForSuccess: + body.jobSpecification.jobPreparationTask?.["waitForSuccess"], + userIdentity: !body.jobSpecification.jobPreparationTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification.jobPreparationTask + ?.userIdentity?.["username"], + autoUser: !body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + rerunOnNodeRebootAfterSuccess: + body.jobSpecification.jobPreparationTask?.[ + "rerunOnNodeRebootAfterSuccess" + ], + }, + jobReleaseTask: !body.jobSpecification.jobReleaseTask + ? undefined + : { + id: body.jobSpecification.jobReleaseTask?.["id"], + commandLine: + body.jobSpecification.jobReleaseTask?.["commandLine"], + containerSettings: !body.jobSpecification.jobReleaseTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.["containerRunOptions"], + imageName: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.["username"], + password: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.["password"], + registryServer: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !body.jobSpecification + .jobReleaseTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + body.jobSpecification.jobReleaseTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + body.jobSpecification.jobReleaseTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + body.jobSpecification.jobReleaseTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: + body.jobSpecification.jobReleaseTask?.["maxWallClockTime"], + retentionTime: + body.jobSpecification.jobReleaseTask?.["retentionTime"], + userIdentity: !body.jobSpecification.jobReleaseTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification.jobReleaseTask?.userIdentity?.[ + "username" + ], + autoUser: !body.jobSpecification.jobReleaseTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification.jobReleaseTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification.jobReleaseTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + }, + commonEnvironmentSettings: ( + body.jobSpecification["commonEnvironmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + poolInfo: { + poolId: body.jobSpecification.poolInfo["poolId"], + autoPoolSpecification: !body.jobSpecification.poolInfo + .autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "autoPoolIdPrefix" + ], + poolLifetimeOption: + body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "poolLifetimeOption" + ], + keepAlive: + body.jobSpecification.poolInfo.autoPoolSpecification?.[ + "keepAlive" + ], + pool: !body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool + ? undefined + : { + displayName: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["displayName"], + vmSize: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["vmSize"], + cloudServiceConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.cloudServiceConfiguration + ? undefined + : { + osFamily: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osFamily"], + osVersion: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.cloudServiceConfiguration?.["osVersion"], + }, + virtualMachineConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["publisher"], + offer: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["offer"], + sku: body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["version"], + virtualMachineImageId: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.imageReference["virtualMachineImageId"], + }, + nodeAgentSKUId: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "nodeAgentSKUId" + ], + windowsConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "dataDisks" + ] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "licenseType" + ], + containerConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.[ + "resourceId" + ], + }, + })), + }, + diskEncryptionConfiguration: !body + .jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.[ + "targets" + ], + }, + nodePlacementConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.[ + "policy" + ], + }, + extensions: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.[ + "extensions" + ] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: + p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !body + .jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.osDisk + ?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["taskSlotsPerNode"], + taskSchedulingPolicy: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["resizeTimeout"], + targetDedicatedNodes: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetDedicatedNodes"], + targetLowPriorityNodes: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetLowPriorityNodes"], + enableAutoScale: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["enableAutoScale"], + autoScaleFormula: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["autoScaleFormula"], + autoScaleEvaluationInterval: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["autoScaleEvaluationInterval"], + enableInterNodeCommunication: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["enableInterNodeCommunication"], + networkConfiguration: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ? undefined + : { + subnetId: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !body.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !body + .jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ? undefined + : { + commandLine: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "commandLine" + ], + containerSettings: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.["imageName"], + registry: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ? undefined + : { + username: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["username"], + password: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !body + .jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask + ?.containerSettings + ?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "resourceFiles" + ] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity + ? undefined + : { + username: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.["username"], + autoUser: !body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + maxTaskRetryCount: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "maxTaskRetryCount" + ], + waitForSuccess: + body.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask?.[ + "waitForSuccess" + ], + }, + certificateReferences: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["certificateReferences"] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["applicationLicenses"], + userAccounts: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["userAccounts"] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["metadata"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["mountConfiguration"] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.[ + "sasKey" + ], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: + p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: + p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.[ + "accountName" + ], + azureFileUrl: + p.azureFileShareConfiguration?.[ + "azureFileUrl" + ], + accountKey: + p.azureFileShareConfiguration?.[ + "accountKey" + ], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.[ + "mountOptions" + ], + }, + })), + targetNodeCommunicationMode: + body.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetNodeCommunicationMode"], + }, + }, + }, + metadata: (body.jobSpecification["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + metadata: (body["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + }); +} + +export async function _createJobScheduleDeserialize( + result: CreateJobSchedule201Response | CreateJobScheduleDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Creates a Job Schedule to the specified Account. */ +export async function createJobSchedule( + context: Client, + body: BatchJobScheduleCreateOptions, + options: CreateJobScheduleOptions = { requestOptions: {} } +): Promise { + const result = await _createJobScheduleSend(context, body, options); + return _createJobScheduleDeserialize(result); +} + +export function _listJobSchedulesSend( + context: Client, + options: ListJobSchedulesOptions = { requestOptions: {} } +): StreamableMethod< + ListJobSchedules200Response | ListJobSchedulesDefaultResponse +> { + return context + .path("/jobschedules") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _listJobSchedulesDeserialize( + result: ListJobSchedules200Response | ListJobSchedulesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + id: p["id"], + displayName: p["displayName"], + url: p["url"], + eTag: p["eTag"], + lastModified: + p["lastModified"] !== undefined + ? new Date(p["lastModified"]) + : undefined, + creationTime: + p["creationTime"] !== undefined + ? new Date(p["creationTime"]) + : undefined, + state: p["state"], + stateTransitionTime: + p["stateTransitionTime"] !== undefined + ? new Date(p["stateTransitionTime"]) + : undefined, + previousState: p["previousState"], + previousStateTransitionTime: + p["previousStateTransitionTime"] !== undefined + ? new Date(p["previousStateTransitionTime"]) + : undefined, + schedule: { + doNotRunUntil: + p.schedule["doNotRunUntil"] !== undefined + ? new Date(p.schedule["doNotRunUntil"]) + : undefined, + doNotRunAfter: + p.schedule["doNotRunAfter"] !== undefined + ? new Date(p.schedule["doNotRunAfter"]) + : undefined, + startWindow: p.schedule["startWindow"], + recurrenceInterval: p.schedule["recurrenceInterval"], + }, + jobSpecification: { + priority: p.jobSpecification["priority"], + allowTaskPreemption: p.jobSpecification["allowTaskPreemption"], + maxParallelTasks: p.jobSpecification["maxParallelTasks"], + displayName: p.jobSpecification["displayName"], + usesTaskDependencies: p.jobSpecification["usesTaskDependencies"], + onAllTasksComplete: p.jobSpecification["onAllTasksComplete"], + onTaskFailure: p.jobSpecification["onTaskFailure"], + networkConfiguration: !p.jobSpecification.networkConfiguration + ? undefined + : { subnetId: p.jobSpecification.networkConfiguration?.["subnetId"] }, + constraints: !p.jobSpecification.constraints + ? undefined + : { + maxWallClockTime: + p.jobSpecification.constraints?.["maxWallClockTime"], + maxTaskRetryCount: + p.jobSpecification.constraints?.["maxTaskRetryCount"], + }, + jobManagerTask: !p.jobSpecification.jobManagerTask + ? undefined + : { + id: p.jobSpecification.jobManagerTask?.["id"], + displayName: p.jobSpecification.jobManagerTask?.["displayName"], + commandLine: p.jobSpecification.jobManagerTask?.["commandLine"], + containerSettings: !p.jobSpecification.jobManagerTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobSpecification.jobManagerTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + p.jobSpecification.jobManagerTask?.containerSettings?.[ + "imageName" + ], + registry: !p.jobSpecification.jobManagerTask + ?.containerSettings?.registry + ? undefined + : { + username: + p.jobSpecification.jobManagerTask?.containerSettings + ?.registry?.["username"], + password: + p.jobSpecification.jobManagerTask?.containerSettings + ?.registry?.["password"], + registryServer: + p.jobSpecification.jobManagerTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !p.jobSpecification.jobManagerTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobSpecification.jobManagerTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobSpecification.jobManagerTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + p.jobSpecification.jobManagerTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: ( + p.jobSpecification.jobManagerTask?.["outputFiles"] ?? [] + ).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: ( + p.jobSpecification.jobManagerTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !p.jobSpecification.jobManagerTask?.constraints + ? undefined + : { + maxWallClockTime: + p.jobSpecification.jobManagerTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + p.jobSpecification.jobManagerTask?.constraints?.[ + "retentionTime" + ], + maxTaskRetryCount: + p.jobSpecification.jobManagerTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + requiredSlots: + p.jobSpecification.jobManagerTask?.["requiredSlots"], + killJobOnCompletion: + p.jobSpecification.jobManagerTask?.["killJobOnCompletion"], + userIdentity: !p.jobSpecification.jobManagerTask?.userIdentity + ? undefined + : { + username: + p.jobSpecification.jobManagerTask?.userIdentity?.[ + "username" + ], + autoUser: !p.jobSpecification.jobManagerTask?.userIdentity + ?.autoUser + ? undefined + : { + scope: + p.jobSpecification.jobManagerTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + p.jobSpecification.jobManagerTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + runExclusive: p.jobSpecification.jobManagerTask?.["runExclusive"], + applicationPackageReferences: ( + p.jobSpecification.jobManagerTask?.[ + "applicationPackageReferences" + ] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !p.jobSpecification.jobManagerTask + ?.authenticationTokenSettings + ? undefined + : { + access: + p.jobSpecification.jobManagerTask + ?.authenticationTokenSettings?.["access"], + }, + allowLowPriorityNode: + p.jobSpecification.jobManagerTask?.["allowLowPriorityNode"], + }, + jobPreparationTask: !p.jobSpecification.jobPreparationTask + ? undefined + : { + id: p.jobSpecification.jobPreparationTask?.["id"], + commandLine: + p.jobSpecification.jobPreparationTask?.["commandLine"], + containerSettings: !p.jobSpecification.jobPreparationTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobSpecification.jobPreparationTask + ?.containerSettings?.["containerRunOptions"], + imageName: + p.jobSpecification.jobPreparationTask + ?.containerSettings?.["imageName"], + registry: !p.jobSpecification.jobPreparationTask + ?.containerSettings?.registry + ? undefined + : { + username: + p.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["username"], + password: + p.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["password"], + registryServer: + p.jobSpecification.jobPreparationTask + ?.containerSettings?.registry?.["registryServer"], + identityReference: !p.jobSpecification + .jobPreparationTask?.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + p.jobSpecification.jobPreparationTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobSpecification.jobPreparationTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + p.jobSpecification.jobPreparationTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + p.jobSpecification.jobPreparationTask?.[ + "environmentSettings" + ] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + constraints: !p.jobSpecification.jobPreparationTask?.constraints + ? undefined + : { + maxWallClockTime: + p.jobSpecification.jobPreparationTask?.constraints?.[ + "maxWallClockTime" + ], + retentionTime: + p.jobSpecification.jobPreparationTask?.constraints?.[ + "retentionTime" + ], + maxTaskRetryCount: + p.jobSpecification.jobPreparationTask?.constraints?.[ + "maxTaskRetryCount" + ], + }, + waitForSuccess: + p.jobSpecification.jobPreparationTask?.["waitForSuccess"], + userIdentity: !p.jobSpecification.jobPreparationTask?.userIdentity + ? undefined + : { + username: + p.jobSpecification.jobPreparationTask?.userIdentity?.[ + "username" + ], + autoUser: !p.jobSpecification.jobPreparationTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + p.jobSpecification.jobPreparationTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + p.jobSpecification.jobPreparationTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + rerunOnNodeRebootAfterSuccess: + p.jobSpecification.jobPreparationTask?.[ + "rerunOnNodeRebootAfterSuccess" + ], + }, + jobReleaseTask: !p.jobSpecification.jobReleaseTask + ? undefined + : { + id: p.jobSpecification.jobReleaseTask?.["id"], + commandLine: p.jobSpecification.jobReleaseTask?.["commandLine"], + containerSettings: !p.jobSpecification.jobReleaseTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobSpecification.jobReleaseTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + p.jobSpecification.jobReleaseTask?.containerSettings?.[ + "imageName" + ], + registry: !p.jobSpecification.jobReleaseTask + ?.containerSettings?.registry + ? undefined + : { + username: + p.jobSpecification.jobReleaseTask?.containerSettings + ?.registry?.["username"], + password: + p.jobSpecification.jobReleaseTask?.containerSettings + ?.registry?.["password"], + registryServer: + p.jobSpecification.jobReleaseTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !p.jobSpecification.jobReleaseTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobSpecification.jobReleaseTask + ?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.jobSpecification.jobReleaseTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: ( + p.jobSpecification.jobReleaseTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + p.jobSpecification.jobReleaseTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + maxWallClockTime: + p.jobSpecification.jobReleaseTask?.["maxWallClockTime"], + retentionTime: + p.jobSpecification.jobReleaseTask?.["retentionTime"], + userIdentity: !p.jobSpecification.jobReleaseTask?.userIdentity + ? undefined + : { + username: + p.jobSpecification.jobReleaseTask?.userIdentity?.[ + "username" + ], + autoUser: !p.jobSpecification.jobReleaseTask?.userIdentity + ?.autoUser + ? undefined + : { + scope: + p.jobSpecification.jobReleaseTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + p.jobSpecification.jobReleaseTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + }, + commonEnvironmentSettings: ( + p.jobSpecification["commonEnvironmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + poolInfo: { + poolId: p.jobSpecification.poolInfo["poolId"], + autoPoolSpecification: !p.jobSpecification.poolInfo + .autoPoolSpecification + ? undefined + : { + autoPoolIdPrefix: + p.jobSpecification.poolInfo.autoPoolSpecification?.[ + "autoPoolIdPrefix" + ], + poolLifetimeOption: + p.jobSpecification.poolInfo.autoPoolSpecification?.[ + "poolLifetimeOption" + ], + keepAlive: + p.jobSpecification.poolInfo.autoPoolSpecification?.[ + "keepAlive" + ], + pool: !p.jobSpecification.poolInfo.autoPoolSpecification?.pool + ? undefined + : { + displayName: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["displayName"], + vmSize: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["vmSize"], + cloudServiceConfiguration: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.cloudServiceConfiguration + ? undefined + : { + osFamily: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.cloudServiceConfiguration?.["osFamily"], + osVersion: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.cloudServiceConfiguration?.[ + "osVersion" + ], + }, + virtualMachineConfiguration: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ? undefined + : { + imageReference: { + publisher: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "publisher" + ], + offer: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "offer" + ], + sku: p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "sku" + ], + version: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "version" + ], + virtualMachineImageId: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "virtualMachineImageId" + ], + exactVersion: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.imageReference[ + "exactVersion" + ], + }, + nodeAgentSKUId: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration?.[ + "nodeAgentSKUId" + ], + windowsConfiguration: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration + ? undefined + : { + enableAutomaticUpdates: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.windowsConfiguration?.[ + "enableAutomaticUpdates" + ], + }, + dataDisks: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration?.[ + "dataDisks" + ] ?? [] + ).map((p) => ({ + lun: p["lun"], + caching: p["caching"], + diskSizeGB: p["diskSizeGB"], + storageAccountType: p["storageAccountType"], + })), + licenseType: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration?.[ + "licenseType" + ], + containerConfiguration: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration + ? undefined + : { + type: p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.["type"], + containerImageNames: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerImageNames" + ], + containerRegistries: ( + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.containerConfiguration?.[ + "containerRegistries" + ] ?? [] + ).map((p) => ({ + username: p["username"], + password: p["password"], + registryServer: p["registryServer"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + }, + diskEncryptionConfiguration: !p.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration + ? undefined + : { + targets: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.diskEncryptionConfiguration?.[ + "targets" + ], + }, + nodePlacementConfiguration: !p.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration + ? undefined + : { + policy: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.nodePlacementConfiguration?.["policy"], + }, + extensions: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.virtualMachineConfiguration?.[ + "extensions" + ] ?? [] + ).map((p) => ({ + name: p["name"], + publisher: p["publisher"], + type: p["type"], + typeHandlerVersion: p["typeHandlerVersion"], + autoUpgradeMinorVersion: + p["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: + p["enableAutomaticUpgrade"], + settings: p["settings"], + protectedSettings: p["protectedSettings"], + provisionAfterExtensions: + p["provisionAfterExtensions"], + })), + osDisk: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ? undefined + : { + ephemeralOSDiskSettings: !p.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.virtualMachineConfiguration?.osDisk + ?.ephemeralOSDiskSettings + ? undefined + : { + placement: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.virtualMachineConfiguration + ?.osDisk?.ephemeralOSDiskSettings?.[ + "placement" + ], + }, + }, + }, + taskSlotsPerNode: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["taskSlotsPerNode"], + taskSchedulingPolicy: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.taskSchedulingPolicy + ? undefined + : { + nodeFillType: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.taskSchedulingPolicy?.["nodeFillType"], + }, + resizeTimeout: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["resizeTimeout"], + targetDedicatedNodes: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetDedicatedNodes"], + targetLowPriorityNodes: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetLowPriorityNodes"], + enableAutoScale: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["enableAutoScale"], + autoScaleFormula: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["autoScaleFormula"], + autoScaleEvaluationInterval: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["autoScaleEvaluationInterval"], + enableInterNodeCommunication: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["enableInterNodeCommunication"], + networkConfiguration: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ? undefined + : { + subnetId: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration?.["subnetId"], + dynamicVNetAssignmentScope: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration?.[ + "dynamicVNetAssignmentScope" + ], + endpointConfiguration: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.networkConfiguration + ?.endpointConfiguration + ? undefined + : { + inboundNATPools: ( + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.endpointConfiguration?.[ + "inboundNATPools" + ] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + backendPort: p["backendPort"], + frontendPortRangeStart: + p["frontendPortRangeStart"], + frontendPortRangeEnd: + p["frontendPortRangeEnd"], + networkSecurityGroupRules: ( + p["networkSecurityGroupRules"] ?? [] + ).map((p) => ({ + priority: p["priority"], + access: p["access"], + sourceAddressPrefix: + p["sourceAddressPrefix"], + sourcePortRanges: p["sourcePortRanges"], + })), + })), + }, + publicIPAddressConfiguration: !p.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration + ? undefined + : { + provision: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "provision" + ], + ipAddressIds: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.networkConfiguration + ?.publicIPAddressConfiguration?.[ + "ipAddressIds" + ], + }, + enableAcceleratedNetworking: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.networkConfiguration?.[ + "enableAcceleratedNetworking" + ], + }, + startTask: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ? undefined + : { + commandLine: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.startTask?.["commandLine"], + containerSettings: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings + ? undefined + : { + containerRunOptions: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.["imageName"], + registry: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.registry + ? undefined + : { + username: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["username"], + password: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["password"], + registryServer: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.["registryServer"], + identityReference: !p.jobSpecification + .poolInfo.autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.containerSettings + ?.registry + ?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.containerSettings?.["workingDirectory"], + }, + resourceFiles: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.startTask?.["resourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: + p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { + resourceId: + p.identityReference?.["resourceId"], + }, + })), + environmentSettings: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ + name: p["name"], + value: p["value"], + })), + userIdentity: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity + ? undefined + : { + username: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.["username"], + autoUser: !p.jobSpecification.poolInfo + .autoPoolSpecification?.pool?.startTask + ?.userIdentity?.autoUser + ? undefined + : { + scope: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["scope"], + elevationLevel: + p.jobSpecification.poolInfo + .autoPoolSpecification?.pool + ?.startTask?.userIdentity + ?.autoUser?.["elevationLevel"], + }, + }, + maxTaskRetryCount: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.startTask?.["maxTaskRetryCount"], + waitForSuccess: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.startTask?.["waitForSuccess"], + }, + certificateReferences: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["certificateReferences"] ?? [] + ).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + applicationPackageReferences: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + applicationLicenses: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["applicationLicenses"], + userAccounts: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["userAccounts"] ?? [] + ).map((p) => ({ + name: p["name"], + password: p["password"], + elevationLevel: p["elevationLevel"], + linuxUserConfiguration: !p.linuxUserConfiguration + ? undefined + : { + uid: p.linuxUserConfiguration?.["uid"], + gid: p.linuxUserConfiguration?.["gid"], + sshPrivateKey: + p.linuxUserConfiguration?.["sshPrivateKey"], + }, + windowsUserConfiguration: !p.windowsUserConfiguration + ? undefined + : { + loginMode: + p.windowsUserConfiguration?.["loginMode"], + }, + })), + metadata: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["metadata"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + mountConfiguration: ( + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["mountConfiguration"] ?? [] + ).map((p) => ({ + azureBlobFileSystemConfiguration: + !p.azureBlobFileSystemConfiguration + ? undefined + : { + accountName: + p.azureBlobFileSystemConfiguration?.[ + "accountName" + ], + containerName: + p.azureBlobFileSystemConfiguration?.[ + "containerName" + ], + accountKey: + p.azureBlobFileSystemConfiguration?.[ + "accountKey" + ], + sasKey: + p.azureBlobFileSystemConfiguration?.[ + "sasKey" + ], + blobfuseOptions: + p.azureBlobFileSystemConfiguration?.[ + "blobfuseOptions" + ], + relativeMountPath: + p.azureBlobFileSystemConfiguration?.[ + "relativeMountPath" + ], + identityReference: !p + .azureBlobFileSystemConfiguration + ?.identityReference + ? undefined + : { + resourceId: + p.azureBlobFileSystemConfiguration + ?.identityReference?.["resourceId"], + }, + }, + nfsMountConfiguration: !p.nfsMountConfiguration + ? undefined + : { + source: p.nfsMountConfiguration?.["source"], + relativeMountPath: + p.nfsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.nfsMountConfiguration?.["mountOptions"], + }, + cifsMountConfiguration: !p.cifsMountConfiguration + ? undefined + : { + username: p.cifsMountConfiguration?.["username"], + source: p.cifsMountConfiguration?.["source"], + relativeMountPath: + p.cifsMountConfiguration?.["relativeMountPath"], + mountOptions: + p.cifsMountConfiguration?.["mountOptions"], + password: p.cifsMountConfiguration?.["password"], + }, + azureFileShareConfiguration: + !p.azureFileShareConfiguration + ? undefined + : { + accountName: + p.azureFileShareConfiguration?.[ + "accountName" + ], + azureFileUrl: + p.azureFileShareConfiguration?.[ + "azureFileUrl" + ], + accountKey: + p.azureFileShareConfiguration?.["accountKey"], + relativeMountPath: + p.azureFileShareConfiguration?.[ + "relativeMountPath" + ], + mountOptions: + p.azureFileShareConfiguration?.[ + "mountOptions" + ], + }, + })), + targetNodeCommunicationMode: + p.jobSpecification.poolInfo.autoPoolSpecification + ?.pool?.["targetNodeCommunicationMode"], + }, + }, + }, + metadata: (p.jobSpecification["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + executionInfo: !p.executionInfo + ? undefined + : { + nextRunTime: + p.executionInfo?.["nextRunTime"] !== undefined + ? new Date(p.executionInfo?.["nextRunTime"]) + : undefined, + recentJob: !p.executionInfo?.recentJob + ? undefined + : { + id: p.executionInfo?.recentJob?.["id"], + url: p.executionInfo?.recentJob?.["url"], + }, + endTime: + p.executionInfo?.["endTime"] !== undefined + ? new Date(p.executionInfo?.["endTime"]) + : undefined, + }, + metadata: (p["metadata"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + stats: !p.stats + ? undefined + : { + url: p.stats?.["url"], + startTime: new Date(p.stats?.["startTime"]), + lastUpdateTime: new Date(p.stats?.["lastUpdateTime"]), + userCPUTime: p.stats?.["userCPUTime"], + kernelCPUTime: p.stats?.["kernelCPUTime"], + wallClockTime: p.stats?.["wallClockTime"], + readIOps: p.stats?.["readIOps"], + writeIOps: p.stats?.["writeIOps"], + readIOGiB: p.stats?.["readIOGiB"], + writeIOGiB: p.stats?.["writeIOGiB"], + numSucceededTasks: p.stats?.["numSucceededTasks"], + numFailedTasks: p.stats?.["numFailedTasks"], + numTaskRetries: p.stats?.["numTaskRetries"], + waitTime: p.stats?.["waitTime"], + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists all of the Job Schedules in the specified Account. */ +export async function listJobSchedules( + context: Client, + options: ListJobSchedulesOptions = { requestOptions: {} } +): Promise { + const result = await _listJobSchedulesSend(context, options); + return _listJobSchedulesDeserialize(result); +} + +export function _createTaskSend( + context: Client, + jobId: string, + body: BatchTaskCreateOptions, + options: CreateTaskOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks", jobId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + id: body["id"], + displayName: body["displayName"], + exitConditions: !body.exitConditions + ? undefined + : { + exitCodes: (body.exitConditions?.["exitCodes"] ?? []).map( + (p) => ({ + code: p["code"], + exitOptions: { + jobAction: p.exitOptions["jobAction"], + dependencyAction: p.exitOptions["dependencyAction"], + }, + }) + ), + exitCodeRanges: ( + body.exitConditions?.["exitCodeRanges"] ?? [] + ).map((p) => ({ + start: p["start"], + end: p["end"], + exitOptions: { + jobAction: p.exitOptions["jobAction"], + dependencyAction: p.exitOptions["dependencyAction"], + }, + })), + preProcessingError: !body.exitConditions?.preProcessingError + ? undefined + : { + jobAction: + body.exitConditions?.preProcessingError?.["jobAction"], + dependencyAction: + body.exitConditions?.preProcessingError?.[ + "dependencyAction" + ], + }, + fileUploadError: !body.exitConditions?.fileUploadError + ? undefined + : { + jobAction: + body.exitConditions?.fileUploadError?.["jobAction"], + dependencyAction: + body.exitConditions?.fileUploadError?.[ + "dependencyAction" + ], + }, + default: !body.exitConditions?.default + ? undefined + : { + jobAction: body.exitConditions?.default?.["jobAction"], + dependencyAction: + body.exitConditions?.default?.["dependencyAction"], + }, + }, + commandLine: body["commandLine"], + containerSettings: !body.containerSettings + ? undefined + : { + containerRunOptions: + body.containerSettings?.["containerRunOptions"], + imageName: body.containerSettings?.["imageName"], + registry: !body.containerSettings?.registry + ? undefined + : { + username: body.containerSettings?.registry?.["username"], + password: body.containerSettings?.registry?.["password"], + registryServer: + body.containerSettings?.registry?.["registryServer"], + identityReference: !body.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + body.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: body.containerSettings?.["workingDirectory"], + }, + resourceFiles: (body["resourceFiles"] ?? []).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: (body["outputFiles"] ?? []).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: (body["environmentSettings"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + affinityInfo: !body.affinityInfo + ? undefined + : { affinityId: body.affinityInfo?.["affinityId"] }, + constraints: !body.constraints + ? undefined + : { + maxWallClockTime: body.constraints?.["maxWallClockTime"], + retentionTime: body.constraints?.["retentionTime"], + maxTaskRetryCount: body.constraints?.["maxTaskRetryCount"], + }, + requiredSlots: body["requiredSlots"], + userIdentity: !body.userIdentity + ? undefined + : { + username: body.userIdentity?.["username"], + autoUser: !body.userIdentity?.autoUser + ? undefined + : { + scope: body.userIdentity?.autoUser?.["scope"], + elevationLevel: + body.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + multiInstanceSettings: !body.multiInstanceSettings + ? undefined + : { + numberOfInstances: + body.multiInstanceSettings?.["numberOfInstances"], + coordinationCommandLine: + body.multiInstanceSettings?.["coordinationCommandLine"], + commonResourceFiles: ( + body.multiInstanceSettings?.["commonResourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + }, + dependsOn: !body.dependsOn + ? undefined + : { + taskIds: body.dependsOn?.["taskIds"], + taskIdRanges: (body.dependsOn?.["taskIdRanges"] ?? []).map( + (p) => ({ start: p["start"], end: p["end"] }) + ), + }, + applicationPackageReferences: ( + body["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !body.authenticationTokenSettings + ? undefined + : { access: body.authenticationTokenSettings?.["access"] }, + }, + }); +} + +export async function _createTaskDeserialize( + result: CreateTask201Response | CreateTaskDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * The maximum lifetime of a Task from addition to completion is 180 days. If a + * Task has not completed within 180 days of being added it will be terminated by + * the Batch service and left in whatever state it was in at that time. + */ +export async function createTask( + context: Client, + jobId: string, + body: BatchTaskCreateOptions, + options: CreateTaskOptions = { requestOptions: {} } +): Promise { + const result = await _createTaskSend(context, jobId, body, options); + return _createTaskDeserialize(result); +} + +export function _listTasksSend( + context: Client, + jobId: string, + options: ListTasksOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks", jobId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _listTasksDeserialize( + result: ListTasks200Response | ListTasksDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + id: p["id"], + displayName: p["displayName"], + url: p["url"], + eTag: p["eTag"], + lastModified: + p["lastModified"] !== undefined + ? new Date(p["lastModified"]) + : undefined, + creationTime: + p["creationTime"] !== undefined + ? new Date(p["creationTime"]) + : undefined, + exitConditions: !p.exitConditions + ? undefined + : { + exitCodes: (p.exitConditions?.["exitCodes"] ?? []).map((p) => ({ + code: p["code"], + exitOptions: { + jobAction: p.exitOptions["jobAction"], + dependencyAction: p.exitOptions["dependencyAction"], + }, + })), + exitCodeRanges: (p.exitConditions?.["exitCodeRanges"] ?? []).map( + (p) => ({ + start: p["start"], + end: p["end"], + exitOptions: { + jobAction: p.exitOptions["jobAction"], + dependencyAction: p.exitOptions["dependencyAction"], + }, + }) + ), + preProcessingError: !p.exitConditions?.preProcessingError + ? undefined + : { + jobAction: + p.exitConditions?.preProcessingError?.["jobAction"], + dependencyAction: + p.exitConditions?.preProcessingError?.["dependencyAction"], + }, + fileUploadError: !p.exitConditions?.fileUploadError + ? undefined + : { + jobAction: p.exitConditions?.fileUploadError?.["jobAction"], + dependencyAction: + p.exitConditions?.fileUploadError?.["dependencyAction"], + }, + default: !p.exitConditions?.default + ? undefined + : { + jobAction: p.exitConditions?.default?.["jobAction"], + dependencyAction: + p.exitConditions?.default?.["dependencyAction"], + }, + }, + state: p["state"], + stateTransitionTime: + p["stateTransitionTime"] !== undefined + ? new Date(p["stateTransitionTime"]) + : undefined, + previousState: p["previousState"], + previousStateTransitionTime: + p["previousStateTransitionTime"] !== undefined + ? new Date(p["previousStateTransitionTime"]) + : undefined, + commandLine: p["commandLine"], + containerSettings: !p.containerSettings + ? undefined + : { + containerRunOptions: p.containerSettings?.["containerRunOptions"], + imageName: p.containerSettings?.["imageName"], + registry: !p.containerSettings?.registry + ? undefined + : { + username: p.containerSettings?.registry?.["username"], + password: p.containerSettings?.registry?.["password"], + registryServer: + p.containerSettings?.registry?.["registryServer"], + identityReference: !p.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + p.containerSettings?.registry?.identityReference?.[ + "resourceId" + ], + }, + }, + workingDirectory: p.containerSettings?.["workingDirectory"], + }, + resourceFiles: (p["resourceFiles"] ?? []).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: (p["outputFiles"] ?? []).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { uploadCondition: p.uploadOptions["uploadCondition"] }, + })), + environmentSettings: (p["environmentSettings"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + affinityInfo: !p.affinityInfo + ? undefined + : { affinityId: p.affinityInfo?.["affinityId"] }, + constraints: !p.constraints + ? undefined + : { + maxWallClockTime: p.constraints?.["maxWallClockTime"], + retentionTime: p.constraints?.["retentionTime"], + maxTaskRetryCount: p.constraints?.["maxTaskRetryCount"], + }, + requiredSlots: p["requiredSlots"], + userIdentity: !p.userIdentity + ? undefined + : { + username: p.userIdentity?.["username"], + autoUser: !p.userIdentity?.autoUser + ? undefined + : { + scope: p.userIdentity?.autoUser?.["scope"], + elevationLevel: p.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + executionInfo: !p.executionInfo + ? undefined + : { + startTime: + p.executionInfo?.["startTime"] !== undefined + ? new Date(p.executionInfo?.["startTime"]) + : undefined, + endTime: + p.executionInfo?.["endTime"] !== undefined + ? new Date(p.executionInfo?.["endTime"]) + : undefined, + exitCode: p.executionInfo?.["exitCode"], + containerInfo: !p.executionInfo?.containerInfo + ? undefined + : { + containerId: p.executionInfo?.containerInfo?.["containerId"], + state: p.executionInfo?.containerInfo?.["state"], + error: p.executionInfo?.containerInfo?.["error"], + }, + failureInfo: !p.executionInfo?.failureInfo + ? undefined + : { + category: p.executionInfo?.failureInfo?.["category"], + code: p.executionInfo?.failureInfo?.["code"], + message: p.executionInfo?.failureInfo?.["message"], + details: ( + p.executionInfo?.failureInfo?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + retryCount: p.executionInfo?.["retryCount"], + lastRetryTime: + p.executionInfo?.["lastRetryTime"] !== undefined + ? new Date(p.executionInfo?.["lastRetryTime"]) + : undefined, + requeueCount: p.executionInfo?.["requeueCount"], + lastRequeueTime: + p.executionInfo?.["lastRequeueTime"] !== undefined + ? new Date(p.executionInfo?.["lastRequeueTime"]) + : undefined, + result: p.executionInfo?.["result"], + }, + nodeInfo: !p.nodeInfo + ? undefined + : { + affinityId: p.nodeInfo?.["affinityId"], + nodeUrl: p.nodeInfo?.["nodeUrl"], + poolId: p.nodeInfo?.["poolId"], + nodeId: p.nodeInfo?.["nodeId"], + taskRootDirectory: p.nodeInfo?.["taskRootDirectory"], + taskRootDirectoryUrl: p.nodeInfo?.["taskRootDirectoryUrl"], + }, + multiInstanceSettings: !p.multiInstanceSettings + ? undefined + : { + numberOfInstances: p.multiInstanceSettings?.["numberOfInstances"], + coordinationCommandLine: + p.multiInstanceSettings?.["coordinationCommandLine"], + commonResourceFiles: ( + p.multiInstanceSettings?.["commonResourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + }, + stats: !p.stats + ? undefined + : { + url: p.stats?.["url"], + startTime: new Date(p.stats?.["startTime"]), + lastUpdateTime: new Date(p.stats?.["lastUpdateTime"]), + userCPUTime: p.stats?.["userCPUTime"], + kernelCPUTime: p.stats?.["kernelCPUTime"], + wallClockTime: p.stats?.["wallClockTime"], + readIOps: p.stats?.["readIOps"], + writeIOps: p.stats?.["writeIOps"], + readIOGiB: p.stats?.["readIOGiB"], + writeIOGiB: p.stats?.["writeIOGiB"], + waitTime: p.stats?.["waitTime"], + }, + dependsOn: !p.dependsOn + ? undefined + : { + taskIds: p.dependsOn?.["taskIds"], + taskIdRanges: (p.dependsOn?.["taskIdRanges"] ?? []).map((p) => ({ + start: p["start"], + end: p["end"], + })), + }, + applicationPackageReferences: ( + p["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !p.authenticationTokenSettings + ? undefined + : { access: p.authenticationTokenSettings?.["access"] }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** + * For multi-instance Tasks, information such as affinityId, executionInfo and + * nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + * information about subtasks. + */ +export async function listTasks( + context: Client, + jobId: string, + options: ListTasksOptions = { requestOptions: {} } +): Promise { + const result = await _listTasksSend(context, jobId, options); + return _listTasksDeserialize(result); +} + +export function _createTaskCollectionSend( + context: Client, + jobId: string, + collection: BatchTaskCollection, + options: CreateTaskCollectionOptions = { requestOptions: {} } +): StreamableMethod< + CreateTaskCollection200Response | CreateTaskCollectionDefaultResponse +> { + return context + .path("/jobs/{jobId}/addtaskcollection", jobId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + value: (collection["value"] ?? []).map((p) => ({ + id: p["id"], + displayName: p["displayName"], + exitConditions: !p.exitConditions + ? undefined + : { + exitCodes: (p.exitConditions?.["exitCodes"] ?? []).map((p) => ({ + code: p["code"], + exitOptions: { + jobAction: p.exitOptions["jobAction"], + dependencyAction: p.exitOptions["dependencyAction"], + }, + })), + exitCodeRanges: ( + p.exitConditions?.["exitCodeRanges"] ?? [] + ).map((p) => ({ + start: p["start"], + end: p["end"], + exitOptions: { + jobAction: p.exitOptions["jobAction"], + dependencyAction: p.exitOptions["dependencyAction"], + }, + })), + preProcessingError: !p.exitConditions?.preProcessingError + ? undefined + : { + jobAction: + p.exitConditions?.preProcessingError?.["jobAction"], + dependencyAction: + p.exitConditions?.preProcessingError?.[ + "dependencyAction" + ], + }, + fileUploadError: !p.exitConditions?.fileUploadError + ? undefined + : { + jobAction: + p.exitConditions?.fileUploadError?.["jobAction"], + dependencyAction: + p.exitConditions?.fileUploadError?.["dependencyAction"], + }, + default: !p.exitConditions?.default + ? undefined + : { + jobAction: p.exitConditions?.default?.["jobAction"], + dependencyAction: + p.exitConditions?.default?.["dependencyAction"], + }, + }, + commandLine: p["commandLine"], + containerSettings: !p.containerSettings + ? undefined + : { + containerRunOptions: + p.containerSettings?.["containerRunOptions"], + imageName: p.containerSettings?.["imageName"], + registry: !p.containerSettings?.registry + ? undefined + : { + username: p.containerSettings?.registry?.["username"], + password: p.containerSettings?.registry?.["password"], + registryServer: + p.containerSettings?.registry?.["registryServer"], + identityReference: !p.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + p.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: p.containerSettings?.["workingDirectory"], + }, + resourceFiles: (p["resourceFiles"] ?? []).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: (p["outputFiles"] ?? []).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container + ?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { + uploadCondition: p.uploadOptions["uploadCondition"], + }, + })), + environmentSettings: (p["environmentSettings"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + affinityInfo: !p.affinityInfo + ? undefined + : { affinityId: p.affinityInfo?.["affinityId"] }, + constraints: !p.constraints + ? undefined + : { + maxWallClockTime: p.constraints?.["maxWallClockTime"], + retentionTime: p.constraints?.["retentionTime"], + maxTaskRetryCount: p.constraints?.["maxTaskRetryCount"], + }, + requiredSlots: p["requiredSlots"], + userIdentity: !p.userIdentity + ? undefined + : { + username: p.userIdentity?.["username"], + autoUser: !p.userIdentity?.autoUser + ? undefined + : { + scope: p.userIdentity?.autoUser?.["scope"], + elevationLevel: + p.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + multiInstanceSettings: !p.multiInstanceSettings + ? undefined + : { + numberOfInstances: + p.multiInstanceSettings?.["numberOfInstances"], + coordinationCommandLine: + p.multiInstanceSettings?.["coordinationCommandLine"], + commonResourceFiles: ( + p.multiInstanceSettings?.["commonResourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + }, + dependsOn: !p.dependsOn + ? undefined + : { + taskIds: p.dependsOn?.["taskIds"], + taskIdRanges: (p.dependsOn?.["taskIdRanges"] ?? []).map( + (p) => ({ start: p["start"], end: p["end"] }) + ), + }, + applicationPackageReferences: ( + p["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !p.authenticationTokenSettings + ? undefined + : { access: p.authenticationTokenSettings?.["access"] }, + })), + }, + }); +} + +export async function _createTaskCollectionDeserialize( + result: CreateTaskCollection200Response | CreateTaskCollectionDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + status: p["status"], + taskId: p["taskId"], + eTag: p["eTag"], + lastModified: + p["lastModified"] !== undefined + ? new Date(p["lastModified"]) + : undefined, + location: p["location"], + error: !p.error + ? undefined + : { + code: p.error?.["code"], + message: !p.error?.message + ? undefined + : { + lang: p.error?.message?.["lang"], + value: p.error?.message?.["value"], + }, + values: (p.error?.["values"] ?? []).map((p) => ({ + key: p["key"], + value: p["value"], + })), + }, + })), + }; +} + +/** + * Note that each Task must have a unique ID. The Batch service may not return the + * results for each Task in the same order the Tasks were submitted in this + * request. If the server times out or the connection is closed during the + * request, the request may have been partially or fully processed, or not at all. + * In such cases, the user should re-issue the request. Note that it is up to the + * user to correctly handle failures when re-issuing a request. For example, you + * should use the same Task IDs during a retry so that if the prior operation + * succeeded, the retry will not create extra Tasks unexpectedly. If the response + * contains any Tasks which failed to add, a client can retry the request. In a + * retry, it is most efficient to resubmit only Tasks that failed to add, and to + * omit Tasks that were successfully added on the first attempt. The maximum + * lifetime of a Task from addition to completion is 180 days. If a Task has not + * completed within 180 days of being added it will be terminated by the Batch + * service and left in whatever state it was in at that time. + */ +export async function createTaskCollection( + context: Client, + jobId: string, + collection: BatchTaskCollection, + options: CreateTaskCollectionOptions = { requestOptions: {} } +): Promise { + const result = await _createTaskCollectionSend( + context, + jobId, + collection, + options + ); + return _createTaskCollectionDeserialize(result); +} + +export function _deleteTaskSend( + context: Client, + jobId: string, + taskId: string, + options: DeleteTaskOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks/{taskId}", jobId, taskId) + .delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _deleteTaskDeserialize( + result: DeleteTask200Response | DeleteTaskDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * When a Task is deleted, all of the files in its directory on the Compute Node + * where it ran are also deleted (regardless of the retention time). For + * multi-instance Tasks, the delete Task operation applies synchronously to the + * primary task; subtasks and their files are then deleted asynchronously in the + * background. + */ +export async function deleteTask( + context: Client, + jobId: string, + taskId: string, + options: DeleteTaskOptions = { requestOptions: {} } +): Promise { + const result = await _deleteTaskSend(context, jobId, taskId, options); + return _deleteTaskDeserialize(result); +} + +export function _getTaskSend( + context: Client, + jobId: string, + taskId: string, + options: GetTaskOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks/{taskId}", jobId, taskId) + .get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { + timeOut: options?.timeOut, + $select: options?.$select, + $expand: options?.$expand, + }, + }); +} + +export async function _getTaskDeserialize( + result: GetTask200Response | GetTaskDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + displayName: result.body["displayName"], + url: result.body["url"], + eTag: result.body["eTag"], + lastModified: + result.body["lastModified"] !== undefined + ? new Date(result.body["lastModified"]) + : undefined, + creationTime: + result.body["creationTime"] !== undefined + ? new Date(result.body["creationTime"]) + : undefined, + exitConditions: !result.body.exitConditions + ? undefined + : { + exitCodes: (result.body.exitConditions?.["exitCodes"] ?? []).map( + (p) => ({ + code: p["code"], + exitOptions: { + jobAction: p.exitOptions["jobAction"], + dependencyAction: p.exitOptions["dependencyAction"], + }, + }) + ), + exitCodeRanges: ( + result.body.exitConditions?.["exitCodeRanges"] ?? [] + ).map((p) => ({ + start: p["start"], + end: p["end"], + exitOptions: { + jobAction: p.exitOptions["jobAction"], + dependencyAction: p.exitOptions["dependencyAction"], + }, + })), + preProcessingError: !result.body.exitConditions?.preProcessingError + ? undefined + : { + jobAction: + result.body.exitConditions?.preProcessingError?.["jobAction"], + dependencyAction: + result.body.exitConditions?.preProcessingError?.[ + "dependencyAction" + ], + }, + fileUploadError: !result.body.exitConditions?.fileUploadError + ? undefined + : { + jobAction: + result.body.exitConditions?.fileUploadError?.["jobAction"], + dependencyAction: + result.body.exitConditions?.fileUploadError?.[ + "dependencyAction" + ], + }, + default: !result.body.exitConditions?.default + ? undefined + : { + jobAction: result.body.exitConditions?.default?.["jobAction"], + dependencyAction: + result.body.exitConditions?.default?.["dependencyAction"], + }, + }, + state: result.body["state"], + stateTransitionTime: + result.body["stateTransitionTime"] !== undefined + ? new Date(result.body["stateTransitionTime"]) + : undefined, + previousState: result.body["previousState"], + previousStateTransitionTime: + result.body["previousStateTransitionTime"] !== undefined + ? new Date(result.body["previousStateTransitionTime"]) + : undefined, + commandLine: result.body["commandLine"], + containerSettings: !result.body.containerSettings + ? undefined + : { + containerRunOptions: + result.body.containerSettings?.["containerRunOptions"], + imageName: result.body.containerSettings?.["imageName"], + registry: !result.body.containerSettings?.registry + ? undefined + : { + username: result.body.containerSettings?.registry?.["username"], + password: result.body.containerSettings?.registry?.["password"], + registryServer: + result.body.containerSettings?.registry?.["registryServer"], + identityReference: !result.body.containerSettings?.registry + ?.identityReference + ? undefined + : { + resourceId: + result.body.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: result.body.containerSettings?.["workingDirectory"], + }, + resourceFiles: (result.body["resourceFiles"] ?? []).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + outputFiles: (result.body["outputFiles"] ?? []).map((p) => ({ + filePattern: p["filePattern"], + destination: { + container: !p.destination.container + ? undefined + : { + path: p.destination.container?.["path"], + containerUrl: p.destination.container?.["containerUrl"], + identityReference: !p.destination.container?.identityReference + ? undefined + : { + resourceId: + p.destination.container?.identityReference?.[ + "resourceId" + ], + }, + uploadHeaders: ( + p.destination.container?.["uploadHeaders"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + }, + uploadOptions: { uploadCondition: p.uploadOptions["uploadCondition"] }, + })), + environmentSettings: (result.body["environmentSettings"] ?? []).map( + (p) => ({ name: p["name"], value: p["value"] }) + ), + affinityInfo: !result.body.affinityInfo + ? undefined + : { affinityId: result.body.affinityInfo?.["affinityId"] }, + constraints: !result.body.constraints + ? undefined + : { + maxWallClockTime: result.body.constraints?.["maxWallClockTime"], + retentionTime: result.body.constraints?.["retentionTime"], + maxTaskRetryCount: result.body.constraints?.["maxTaskRetryCount"], + }, + requiredSlots: result.body["requiredSlots"], + userIdentity: !result.body.userIdentity + ? undefined + : { + username: result.body.userIdentity?.["username"], + autoUser: !result.body.userIdentity?.autoUser + ? undefined + : { + scope: result.body.userIdentity?.autoUser?.["scope"], + elevationLevel: + result.body.userIdentity?.autoUser?.["elevationLevel"], + }, + }, + executionInfo: !result.body.executionInfo + ? undefined + : { + startTime: + result.body.executionInfo?.["startTime"] !== undefined + ? new Date(result.body.executionInfo?.["startTime"]) + : undefined, + endTime: + result.body.executionInfo?.["endTime"] !== undefined + ? new Date(result.body.executionInfo?.["endTime"]) + : undefined, + exitCode: result.body.executionInfo?.["exitCode"], + containerInfo: !result.body.executionInfo?.containerInfo + ? undefined + : { + containerId: + result.body.executionInfo?.containerInfo?.["containerId"], + state: result.body.executionInfo?.containerInfo?.["state"], + error: result.body.executionInfo?.containerInfo?.["error"], + }, + failureInfo: !result.body.executionInfo?.failureInfo + ? undefined + : { + category: result.body.executionInfo?.failureInfo?.["category"], + code: result.body.executionInfo?.failureInfo?.["code"], + message: result.body.executionInfo?.failureInfo?.["message"], + details: ( + result.body.executionInfo?.failureInfo?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + retryCount: result.body.executionInfo?.["retryCount"], + lastRetryTime: + result.body.executionInfo?.["lastRetryTime"] !== undefined + ? new Date(result.body.executionInfo?.["lastRetryTime"]) + : undefined, + requeueCount: result.body.executionInfo?.["requeueCount"], + lastRequeueTime: + result.body.executionInfo?.["lastRequeueTime"] !== undefined + ? new Date(result.body.executionInfo?.["lastRequeueTime"]) + : undefined, + result: result.body.executionInfo?.["result"], + }, + nodeInfo: !result.body.nodeInfo + ? undefined + : { + affinityId: result.body.nodeInfo?.["affinityId"], + nodeUrl: result.body.nodeInfo?.["nodeUrl"], + poolId: result.body.nodeInfo?.["poolId"], + nodeId: result.body.nodeInfo?.["nodeId"], + taskRootDirectory: result.body.nodeInfo?.["taskRootDirectory"], + taskRootDirectoryUrl: result.body.nodeInfo?.["taskRootDirectoryUrl"], + }, + multiInstanceSettings: !result.body.multiInstanceSettings + ? undefined + : { + numberOfInstances: + result.body.multiInstanceSettings?.["numberOfInstances"], + coordinationCommandLine: + result.body.multiInstanceSettings?.["coordinationCommandLine"], + commonResourceFiles: ( + result.body.multiInstanceSettings?.["commonResourceFiles"] ?? [] + ).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + }, + stats: !result.body.stats + ? undefined + : { + url: result.body.stats?.["url"], + startTime: new Date(result.body.stats?.["startTime"]), + lastUpdateTime: new Date(result.body.stats?.["lastUpdateTime"]), + userCPUTime: result.body.stats?.["userCPUTime"], + kernelCPUTime: result.body.stats?.["kernelCPUTime"], + wallClockTime: result.body.stats?.["wallClockTime"], + readIOps: result.body.stats?.["readIOps"], + writeIOps: result.body.stats?.["writeIOps"], + readIOGiB: result.body.stats?.["readIOGiB"], + writeIOGiB: result.body.stats?.["writeIOGiB"], + waitTime: result.body.stats?.["waitTime"], + }, + dependsOn: !result.body.dependsOn + ? undefined + : { + taskIds: result.body.dependsOn?.["taskIds"], + taskIdRanges: (result.body.dependsOn?.["taskIdRanges"] ?? []).map( + (p) => ({ start: p["start"], end: p["end"] }) + ), + }, + applicationPackageReferences: ( + result.body["applicationPackageReferences"] ?? [] + ).map((p) => ({ + applicationId: p["applicationId"], + version: p["version"], + })), + authenticationTokenSettings: !result.body.authenticationTokenSettings + ? undefined + : { access: result.body.authenticationTokenSettings?.["access"] }, + }; +} + +/** + * For multi-instance Tasks, information such as affinityId, executionInfo and + * nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + * information about subtasks. + */ +export async function getTask( + context: Client, + jobId: string, + taskId: string, + options: GetTaskOptions = { requestOptions: {} } +): Promise { + const result = await _getTaskSend(context, jobId, taskId, options); + return _getTaskDeserialize(result); +} + +export function _replaceTaskSend( + context: Client, + jobId: string, + taskId: string, + body: BatchTask, + options: ReplaceTaskOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks/{taskId}", jobId, taskId) + .put({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + body: { + constraints: !body.constraints + ? undefined + : { + maxWallClockTime: body.constraints?.["maxWallClockTime"], + retentionTime: body.constraints?.["retentionTime"], + maxTaskRetryCount: body.constraints?.["maxTaskRetryCount"], + }, + }, + }); +} + +export async function _replaceTaskDeserialize( + result: ReplaceTask200Response | ReplaceTaskDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Updates the properties of the specified Task. */ +export async function replaceTask( + context: Client, + jobId: string, + taskId: string, + body: BatchTask, + options: ReplaceTaskOptions = { requestOptions: {} } +): Promise { + const result = await _replaceTaskSend(context, jobId, taskId, body, options); + return _replaceTaskDeserialize(result); +} + +export function _listSubTasksSend( + context: Client, + jobId: string, + taskId: string, + options: ListSubTasksOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks/{taskId}/subtasksinfo", jobId, taskId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut, $select: options?.$select }, + }); +} + +export async function _listSubTasksDeserialize( + result: ListSubTasks200Response | ListSubTasksDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + id: p["id"], + nodeInfo: !p.nodeInfo + ? undefined + : { + affinityId: p.nodeInfo?.["affinityId"], + nodeUrl: p.nodeInfo?.["nodeUrl"], + poolId: p.nodeInfo?.["poolId"], + nodeId: p.nodeInfo?.["nodeId"], + taskRootDirectory: p.nodeInfo?.["taskRootDirectory"], + taskRootDirectoryUrl: p.nodeInfo?.["taskRootDirectoryUrl"], + }, + startTime: + p["startTime"] !== undefined ? new Date(p["startTime"]) : undefined, + endTime: p["endTime"] !== undefined ? new Date(p["endTime"]) : undefined, + exitCode: p["exitCode"], + containerInfo: !p.containerInfo + ? undefined + : { + containerId: p.containerInfo?.["containerId"], + state: p.containerInfo?.["state"], + error: p.containerInfo?.["error"], + }, + failureInfo: !p.failureInfo + ? undefined + : { + category: p.failureInfo?.["category"], + code: p.failureInfo?.["code"], + message: p.failureInfo?.["message"], + details: (p.failureInfo?.["details"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + }, + state: p["state"], + stateTransitionTime: + p["stateTransitionTime"] !== undefined + ? new Date(p["stateTransitionTime"]) + : undefined, + previousState: p["previousState"], + previousStateTransitionTime: + p["previousStateTransitionTime"] !== undefined + ? new Date(p["previousStateTransitionTime"]) + : undefined, + result: p["result"], + })), + }; +} + +/** If the Task is not a multi-instance Task then this returns an empty collection. */ +export async function listSubTasks( + context: Client, + jobId: string, + taskId: string, + options: ListSubTasksOptions = { requestOptions: {} } +): Promise { + const result = await _listSubTasksSend(context, jobId, taskId, options); + return _listSubTasksDeserialize(result); +} + +export function _terminateTaskSend( + context: Client, + jobId: string, + taskId: string, + options: TerminateTaskOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks/{taskId}/terminate", jobId, taskId) + .post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _terminateTaskDeserialize( + result: TerminateTask204Response | TerminateTaskDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * When the Task has been terminated, it moves to the completed state. For + * multi-instance Tasks, the terminate Task operation applies synchronously to the + * primary task; subtasks are then terminated asynchronously in the background. + */ +export async function terminateTask( + context: Client, + jobId: string, + taskId: string, + options: TerminateTaskOptions = { requestOptions: {} } +): Promise { + const result = await _terminateTaskSend(context, jobId, taskId, options); + return _terminateTaskDeserialize(result); +} + +export function _reactivateTaskSend( + context: Client, + jobId: string, + taskId: string, + options: ReactivateTaskOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks/{taskId}/reactivate", jobId, taskId) + .post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined + ? { "if-match": options?.ifMatch } + : {}), + ...(options?.ifNoneMatch !== undefined + ? { "if-none-match": options?.ifNoneMatch } + : {}), + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _reactivateTaskDeserialize( + result: ReactivateTask204Response | ReactivateTaskDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * Reactivation makes a Task eligible to be retried again up to its maximum retry + * count. The Task's state is changed to active. As the Task is no longer in the + * completed state, any previous exit code or failure information is no longer + * available after reactivation. Each time a Task is reactivated, its retry count + * is reset to 0. Reactivation will fail for Tasks that are not completed or that + * previously completed successfully (with an exit code of 0). Additionally, it + * will fail if the Job has completed (or is terminating or deleting). + */ +export async function reactivateTask( + context: Client, + jobId: string, + taskId: string, + options: ReactivateTaskOptions = { requestOptions: {} } +): Promise { + const result = await _reactivateTaskSend(context, jobId, taskId, options); + return _reactivateTaskDeserialize(result); +} + +export function _deleteTaskFileSend( + context: Client, + jobId: string, + taskId: string, + filePath: string, + options: DeleteTaskFileOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path( + "/jobs/{jobId}/tasks/{taskId}/files/{filePath}", + jobId, + taskId, + filePath + ) + .delete({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + timeOut: options?.timeOut, + recursive: options?.recursive, + }, + }); +} + +export async function _deleteTaskFileDeserialize( + result: DeleteTaskFile200Response | DeleteTaskFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Deletes the specified Task file from the Compute Node where the Task ran. */ +export async function deleteTaskFile( + context: Client, + jobId: string, + taskId: string, + filePath: string, + options: DeleteTaskFileOptions = { requestOptions: {} } +): Promise { + const result = await _deleteTaskFileSend( + context, + jobId, + taskId, + filePath, + options + ); + return _deleteTaskFileDeserialize(result); +} + +export function _getTaskFileSend( + context: Client, + jobId: string, + taskId: string, + filePath: string, + options: GetTaskFileOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path( + "/jobs/{jobId}/tasks/{taskId}/files/{filePath}", + jobId, + taskId, + filePath + ) + .get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + ...(options?.ocpRange !== undefined + ? { "ocp-range": options?.ocpRange } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _getTaskFileDeserialize( + result: GetTaskFile200Response | GetTaskFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return result.body; +} + +/** Returns the content of the specified Task file. */ +export async function getTaskFile( + context: Client, + jobId: string, + taskId: string, + filePath: string, + options: GetTaskFileOptions = { requestOptions: {} } +): Promise { + const result = await _getTaskFileSend( + context, + jobId, + taskId, + filePath, + options + ); + return _getTaskFileDeserialize(result); +} + +export function _getTaskFilePropertiesSend( + context: Client, + jobId: string, + taskId: string, + filePath: string, + options: GetTaskFilePropertiesOptions = { requestOptions: {} } +): StreamableMethod< + GetTaskFileProperties200Response | GetTaskFilePropertiesDefaultResponse +> { + return context + .path( + "/jobs/{jobId}/tasks/{taskId}/files/{filePath}", + jobId, + taskId, + filePath + ) + .head({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _getTaskFilePropertiesDeserialize( + result: + | GetTaskFileProperties200Response + | GetTaskFilePropertiesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Gets the properties of the specified Task file. */ +export async function getTaskFileProperties( + context: Client, + jobId: string, + taskId: string, + filePath: string, + options: GetTaskFilePropertiesOptions = { requestOptions: {} } +): Promise { + const result = await _getTaskFilePropertiesSend( + context, + jobId, + taskId, + filePath, + options + ); + return _getTaskFilePropertiesDeserialize(result); +} + +export function _listTaskFilesSend( + context: Client, + jobId: string, + taskId: string, + options: ListTaskFilesOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/jobs/{jobId}/tasks/{taskId}/files", jobId, taskId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + recursive: options?.recursive, + }, + }); +} + +export async function _listTaskFilesDeserialize( + result: ListTaskFiles200Response | ListTaskFilesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + name: p["name"], + url: p["url"], + isDirectory: p["isDirectory"], + properties: !p.properties + ? undefined + : { + creationTime: + p.properties?.["creationTime"] !== undefined + ? new Date(p.properties?.["creationTime"]) + : undefined, + lastModified: new Date(p.properties?.["lastModified"]), + contentLength: p.properties?.["contentLength"], + contentType: p.properties?.["contentType"], + fileMode: p.properties?.["fileMode"], + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists the files in a Task's directory on its Compute Node. */ +export async function listTaskFiles( + context: Client, + jobId: string, + taskId: string, + options: ListTaskFilesOptions = { requestOptions: {} } +): Promise { + const result = await _listTaskFilesSend(context, jobId, taskId, options); + return _listTaskFilesDeserialize(result); +} + +export function _createNodeUserSend( + context: Client, + poolId: string, + nodeId: string, + body: BatchNodeUserCreateOptions, + options: CreateNodeUserOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/nodes/{nodeId}/users", poolId, nodeId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + name: body["name"], + isAdmin: body["isAdmin"], + expiryTime: body["expiryTime"]?.toISOString(), + password: body["password"], + sshPublicKey: body["sshPublicKey"], + }, + }); +} + +export async function _createNodeUserDeserialize( + result: CreateNodeUser201Response | CreateNodeUserDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * You can add a user Account to a Compute Node only when it is in the idle or + * running state. + */ +export async function createNodeUser( + context: Client, + poolId: string, + nodeId: string, + body: BatchNodeUserCreateOptions, + options: CreateNodeUserOptions = { requestOptions: {} } +): Promise { + const result = await _createNodeUserSend( + context, + poolId, + nodeId, + body, + options + ); + return _createNodeUserDeserialize(result); +} + +export function _deleteNodeUserSend( + context: Client, + poolId: string, + nodeId: string, + userName: string, + options: DeleteNodeUserOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path( + "/pools/{poolId}/nodes/{nodeId}/users/{userName}", + poolId, + nodeId, + userName + ) + .delete({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _deleteNodeUserDeserialize( + result: DeleteNodeUser200Response | DeleteNodeUserDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * You can delete a user Account to a Compute Node only when it is in the idle or + * running state. + */ +export async function deleteNodeUser( + context: Client, + poolId: string, + nodeId: string, + userName: string, + options: DeleteNodeUserOptions = { requestOptions: {} } +): Promise { + const result = await _deleteNodeUserSend( + context, + poolId, + nodeId, + userName, + options + ); + return _deleteNodeUserDeserialize(result); +} + +export function _replaceNodeUserSend( + context: Client, + poolId: string, + nodeId: string, + userName: string, + body: BatchNodeUserUpdateOptions, + options: ReplaceNodeUserOptions = { requestOptions: {} } +): StreamableMethod< + ReplaceNodeUser200Response | ReplaceNodeUserDefaultResponse +> { + return context + .path( + "/pools/{poolId}/nodes/{nodeId}/users/{userName}", + poolId, + nodeId, + userName + ) + .put({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + password: body["password"], + expiryTime: body["expiryTime"]?.toISOString(), + sshPublicKey: body["sshPublicKey"], + }, + }); +} + +export async function _replaceNodeUserDeserialize( + result: ReplaceNodeUser200Response | ReplaceNodeUserDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * This operation replaces of all the updatable properties of the Account. For + * example, if the expiryTime element is not specified, the current value is + * replaced with the default value, not left unmodified. You can update a user + * Account on a Compute Node only when it is in the idle or running state. + */ +export async function replaceNodeUser( + context: Client, + poolId: string, + nodeId: string, + userName: string, + body: BatchNodeUserUpdateOptions, + options: ReplaceNodeUserOptions = { requestOptions: {} } +): Promise { + const result = await _replaceNodeUserSend( + context, + poolId, + nodeId, + userName, + body, + options + ); + return _replaceNodeUserDeserialize(result); +} + +export function _getNodeSend( + context: Client, + poolId: string, + nodeId: string, + options: GetNodeOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/nodes/{nodeId}", poolId, nodeId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut, $select: options?.$select }, + }); +} + +export async function _getNodeDeserialize( + result: GetNode200Response | GetNodeDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + url: result.body["url"], + state: result.body["state"], + schedulingState: result.body["schedulingState"], + stateTransitionTime: + result.body["stateTransitionTime"] !== undefined + ? new Date(result.body["stateTransitionTime"]) + : undefined, + lastBootTime: + result.body["lastBootTime"] !== undefined + ? new Date(result.body["lastBootTime"]) + : undefined, + allocationTime: + result.body["allocationTime"] !== undefined + ? new Date(result.body["allocationTime"]) + : undefined, + ipAddress: result.body["ipAddress"], + affinityId: result.body["affinityId"], + vmSize: result.body["vmSize"], + totalTasksRun: result.body["totalTasksRun"], + runningTasksCount: result.body["runningTasksCount"], + runningTaskSlotsCount: result.body["runningTaskSlotsCount"], + totalTasksSucceeded: result.body["totalTasksSucceeded"], + recentTasks: (result.body["recentTasks"] ?? []).map((p) => ({ + taskUrl: p["taskUrl"], + jobId: p["jobId"], + taskId: p["taskId"], + subtaskId: p["subtaskId"], + taskState: p["taskState"], + executionInfo: !p.executionInfo + ? undefined + : { + startTime: + p.executionInfo?.["startTime"] !== undefined + ? new Date(p.executionInfo?.["startTime"]) + : undefined, + endTime: + p.executionInfo?.["endTime"] !== undefined + ? new Date(p.executionInfo?.["endTime"]) + : undefined, + exitCode: p.executionInfo?.["exitCode"], + containerInfo: !p.executionInfo?.containerInfo + ? undefined + : { + containerId: p.executionInfo?.containerInfo?.["containerId"], + state: p.executionInfo?.containerInfo?.["state"], + error: p.executionInfo?.containerInfo?.["error"], + }, + failureInfo: !p.executionInfo?.failureInfo + ? undefined + : { + category: p.executionInfo?.failureInfo?.["category"], + code: p.executionInfo?.failureInfo?.["code"], + message: p.executionInfo?.failureInfo?.["message"], + details: ( + p.executionInfo?.failureInfo?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + retryCount: p.executionInfo?.["retryCount"], + lastRetryTime: + p.executionInfo?.["lastRetryTime"] !== undefined + ? new Date(p.executionInfo?.["lastRetryTime"]) + : undefined, + requeueCount: p.executionInfo?.["requeueCount"], + lastRequeueTime: + p.executionInfo?.["lastRequeueTime"] !== undefined + ? new Date(p.executionInfo?.["lastRequeueTime"]) + : undefined, + result: p.executionInfo?.["result"], + }, + })), + startTask: !result.body.startTask + ? undefined + : { + commandLine: result.body.startTask?.["commandLine"], + containerSettings: !result.body.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + result.body.startTask?.containerSettings?.[ + "containerRunOptions" + ], + imageName: + result.body.startTask?.containerSettings?.["imageName"], + registry: !result.body.startTask?.containerSettings?.registry + ? undefined + : { + username: + result.body.startTask?.containerSettings?.registry?.[ + "username" + ], + password: + result.body.startTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + result.body.startTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !result.body.startTask + ?.containerSettings?.registry?.identityReference + ? undefined + : { + resourceId: + result.body.startTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + result.body.startTask?.containerSettings?.[ + "workingDirectory" + ], + }, + resourceFiles: (result.body.startTask?.["resourceFiles"] ?? []).map( + (p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + }) + ), + environmentSettings: ( + result.body.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + userIdentity: !result.body.startTask?.userIdentity + ? undefined + : { + username: result.body.startTask?.userIdentity?.["username"], + autoUser: !result.body.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: + result.body.startTask?.userIdentity?.autoUser?.[ + "scope" + ], + elevationLevel: + result.body.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: result.body.startTask?.["maxTaskRetryCount"], + waitForSuccess: result.body.startTask?.["waitForSuccess"], + }, + startTaskInfo: !result.body.startTaskInfo + ? undefined + : { + state: result.body.startTaskInfo?.["state"], + startTime: new Date(result.body.startTaskInfo?.["startTime"]), + endTime: + result.body.startTaskInfo?.["endTime"] !== undefined + ? new Date(result.body.startTaskInfo?.["endTime"]) + : undefined, + exitCode: result.body.startTaskInfo?.["exitCode"], + containerInfo: !result.body.startTaskInfo?.containerInfo + ? undefined + : { + containerId: + result.body.startTaskInfo?.containerInfo?.["containerId"], + state: result.body.startTaskInfo?.containerInfo?.["state"], + error: result.body.startTaskInfo?.containerInfo?.["error"], + }, + failureInfo: !result.body.startTaskInfo?.failureInfo + ? undefined + : { + category: result.body.startTaskInfo?.failureInfo?.["category"], + code: result.body.startTaskInfo?.failureInfo?.["code"], + message: result.body.startTaskInfo?.failureInfo?.["message"], + details: ( + result.body.startTaskInfo?.failureInfo?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + retryCount: result.body.startTaskInfo?.["retryCount"], + lastRetryTime: + result.body.startTaskInfo?.["lastRetryTime"] !== undefined + ? new Date(result.body.startTaskInfo?.["lastRetryTime"]) + : undefined, + result: result.body.startTaskInfo?.["result"], + }, + certificateReferences: (result.body["certificateReferences"] ?? []).map( + (p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + }) + ), + errors: (result.body["errors"] ?? []).map((p) => ({ + code: p["code"], + message: p["message"], + errorDetails: (p["errorDetails"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + })), + isDedicated: result.body["isDedicated"], + endpointConfiguration: !result.body.endpointConfiguration + ? undefined + : { + inboundEndpoints: ( + result.body.endpointConfiguration?.["inboundEndpoints"] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + publicIPAddress: p["publicIPAddress"], + publicFQDN: p["publicFQDN"], + frontendPort: p["frontendPort"], + backendPort: p["backendPort"], + })), + }, + nodeAgentInfo: !result.body.nodeAgentInfo + ? undefined + : { + version: result.body.nodeAgentInfo?.["version"], + lastUpdateTime: new Date( + result.body.nodeAgentInfo?.["lastUpdateTime"] + ), + }, + virtualMachineInfo: !result.body.virtualMachineInfo + ? undefined + : { + imageReference: !result.body.virtualMachineInfo?.imageReference + ? undefined + : { + publisher: + result.body.virtualMachineInfo?.imageReference?.["publisher"], + offer: + result.body.virtualMachineInfo?.imageReference?.["offer"], + sku: result.body.virtualMachineInfo?.imageReference?.["sku"], + version: + result.body.virtualMachineInfo?.imageReference?.["version"], + virtualMachineImageId: + result.body.virtualMachineInfo?.imageReference?.[ + "virtualMachineImageId" + ], + exactVersion: + result.body.virtualMachineInfo?.imageReference?.[ + "exactVersion" + ], + }, + }, + }; +} + +/** Gets information about the specified Compute Node. */ +export async function getNode( + context: Client, + poolId: string, + nodeId: string, + options: GetNodeOptions = { requestOptions: {} } +): Promise { + const result = await _getNodeSend(context, poolId, nodeId, options); + return _getNodeDeserialize(result); +} + +export function _rebootNodeSend( + context: Client, + poolId: string, + nodeId: string, + body: NodeRebootOptions, + options: RebootNodeOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/nodes/{nodeId}/reboot", poolId, nodeId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { nodeRebootOption: body["nodeRebootOption"] }, + }); +} + +export async function _rebootNodeDeserialize( + result: RebootNode202Response | RebootNodeDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** You can restart a Compute Node only if it is in an idle or running state. */ +export async function rebootNode( + context: Client, + poolId: string, + nodeId: string, + body: NodeRebootOptions, + options: RebootNodeOptions = { requestOptions: {} } +): Promise { + const result = await _rebootNodeSend(context, poolId, nodeId, body, options); + return _rebootNodeDeserialize(result); +} + +export function _reimageNodeSend( + context: Client, + poolId: string, + nodeId: string, + body: NodeReimageOptions, + options: ReimageNodeOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/nodes/{nodeId}/reimage", poolId, nodeId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { nodeReimageOption: body["nodeReimageOption"] }, + }); +} + +export async function _reimageNodeDeserialize( + result: ReimageNode202Response | ReimageNodeDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * You can reinstall the operating system on a Compute Node only if it is in an + * idle or running state. This API can be invoked only on Pools created with the + * cloud service configuration property. + */ +export async function reimageNode( + context: Client, + poolId: string, + nodeId: string, + body: NodeReimageOptions, + options: ReimageNodeOptions = { requestOptions: {} } +): Promise { + const result = await _reimageNodeSend(context, poolId, nodeId, body, options); + return _reimageNodeDeserialize(result); +} + +export function _disableNodeSchedulingSend( + context: Client, + poolId: string, + nodeId: string, + body: NodeDisableSchedulingOptions, + options: DisableNodeSchedulingOptions = { requestOptions: {} } +): StreamableMethod< + DisableNodeScheduling200Response | DisableNodeSchedulingDefaultResponse +> { + return context + .path("/pools/{poolId}/nodes/{nodeId}/disablescheduling", poolId, nodeId) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + nodeDisableSchedulingOption: body["nodeDisableSchedulingOption"], + }, + }); +} + +export async function _disableNodeSchedulingDeserialize( + result: + | DisableNodeScheduling200Response + | DisableNodeSchedulingDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * You can disable Task scheduling on a Compute Node only if its current + * scheduling state is enabled. + */ +export async function disableNodeScheduling( + context: Client, + poolId: string, + nodeId: string, + body: NodeDisableSchedulingOptions, + options: DisableNodeSchedulingOptions = { requestOptions: {} } +): Promise { + const result = await _disableNodeSchedulingSend( + context, + poolId, + nodeId, + body, + options + ); + return _disableNodeSchedulingDeserialize(result); +} + +export function _enableNodeSchedulingSend( + context: Client, + poolId: string, + nodeId: string, + options: EnableNodeSchedulingOptions = { requestOptions: {} } +): StreamableMethod< + EnableNodeScheduling200Response | EnableNodeSchedulingDefaultResponse +> { + return context + .path("/pools/{poolId}/nodes/{nodeId}/enablescheduling", poolId, nodeId) + .post({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _enableNodeSchedulingDeserialize( + result: EnableNodeScheduling200Response | EnableNodeSchedulingDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** + * You can enable Task scheduling on a Compute Node only if its current scheduling + * state is disabled + */ +export async function enableNodeScheduling( + context: Client, + poolId: string, + nodeId: string, + options: EnableNodeSchedulingOptions = { requestOptions: {} } +): Promise { + const result = await _enableNodeSchedulingSend( + context, + poolId, + nodeId, + options + ); + return _enableNodeSchedulingDeserialize(result); +} + +export function _getNodeRemoteLoginSettingsSend( + context: Client, + poolId: string, + nodeId: string, + options: GetNodeRemoteLoginSettingsOptions = { requestOptions: {} } +): StreamableMethod< + | GetNodeRemoteLoginSettings200Response + | GetNodeRemoteLoginSettingsDefaultResponse +> { + return context + .path("/pools/{poolId}/nodes/{nodeId}/remoteloginsettings", poolId, nodeId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _getNodeRemoteLoginSettingsDeserialize( + result: + | GetNodeRemoteLoginSettings200Response + | GetNodeRemoteLoginSettingsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + remoteLoginIPAddress: result.body["remoteLoginIPAddress"], + remoteLoginPort: result.body["remoteLoginPort"], + }; +} + +/** + * Before you can remotely login to a Compute Node using the remote login + * settings, you must create a user Account on the Compute Node. This API can be + * invoked only on Pools created with the virtual machine configuration property. + * For Pools created with a cloud service configuration, see the GetRemoteDesktop + * API. + */ +export async function getNodeRemoteLoginSettings( + context: Client, + poolId: string, + nodeId: string, + options: GetNodeRemoteLoginSettingsOptions = { requestOptions: {} } +): Promise { + const result = await _getNodeRemoteLoginSettingsSend( + context, + poolId, + nodeId, + options + ); + return _getNodeRemoteLoginSettingsDeserialize(result); +} + +export function _getNodeRemoteDesktopFileSend( + context: Client, + poolId: string, + nodeId: string, + options: GetNodeRemoteDesktopFileOptions = { requestOptions: {} } +): StreamableMethod< + GetNodeRemoteDesktopFile200Response | GetNodeRemoteDesktopFileDefaultResponse +> { + return context + .path("/pools/{poolId}/nodes/{nodeId}/rdp", poolId, nodeId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _getNodeRemoteDesktopFileDeserialize( + result: + | GetNodeRemoteDesktopFile200Response + | GetNodeRemoteDesktopFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return typeof result.body === "string" + ? stringToUint8Array(result.body, "base64") + : result.body; +} + +/** + * Before you can access a Compute Node by using the RDP file, you must create a + * user Account on the Compute Node. This API can only be invoked on Pools created + * with a cloud service configuration. For Pools created with a virtual machine + * configuration, see the GetRemoteLoginSettings API. + */ +export async function getNodeRemoteDesktopFile( + context: Client, + poolId: string, + nodeId: string, + options: GetNodeRemoteDesktopFileOptions = { requestOptions: {} } +): Promise { + const result = await _getNodeRemoteDesktopFileSend( + context, + poolId, + nodeId, + options + ); + return _getNodeRemoteDesktopFileDeserialize(result); +} + +export function _uploadNodeLogsSend( + context: Client, + poolId: string, + nodeId: string, + body: UploadBatchServiceLogsOptions, + options: UploadNodeLogsOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path( + "/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs", + poolId, + nodeId + ) + .post({ + ...operationOptionsToRequestParameters(options), + contentType: + (options.contentType as any) ?? + "application/json; odata=minimalmetadata", + queryParameters: { timeOut: options?.timeOut }, + body: { + containerUrl: body["containerUrl"], + startTime: body["startTime"].toISOString(), + endTime: body["endTime"]?.toISOString(), + identityReference: !body.identityReference + ? undefined + : { resourceId: body.identityReference?.["resourceId"] }, + }, + }); +} + +export async function _uploadNodeLogsDeserialize( + result: UploadNodeLogs200Response | UploadNodeLogsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + virtualDirectoryName: result.body["virtualDirectoryName"], + numberOfFilesUploaded: result.body["numberOfFilesUploaded"], + }; +} + +/** + * This is for gathering Azure Batch service log files in an automated fashion + * from Compute Nodes if you are experiencing an error and wish to escalate to + * Azure support. The Azure Batch service log files should be shared with Azure + * support to aid in debugging issues with the Batch service. + */ +export async function uploadNodeLogs( + context: Client, + poolId: string, + nodeId: string, + body: UploadBatchServiceLogsOptions, + options: UploadNodeLogsOptions = { requestOptions: {} } +): Promise { + const result = await _uploadNodeLogsSend( + context, + poolId, + nodeId, + body, + options + ); + return _uploadNodeLogsDeserialize(result); +} + +export function _listNodesSend( + context: Client, + poolId: string, + options: ListNodesOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/nodes", poolId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + $select: options?.$select, + }, + }); +} + +export async function _listNodesDeserialize( + result: ListNodes200Response | ListNodesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + id: p["id"], + url: p["url"], + state: p["state"], + schedulingState: p["schedulingState"], + stateTransitionTime: + p["stateTransitionTime"] !== undefined + ? new Date(p["stateTransitionTime"]) + : undefined, + lastBootTime: + p["lastBootTime"] !== undefined + ? new Date(p["lastBootTime"]) + : undefined, + allocationTime: + p["allocationTime"] !== undefined + ? new Date(p["allocationTime"]) + : undefined, + ipAddress: p["ipAddress"], + affinityId: p["affinityId"], + vmSize: p["vmSize"], + totalTasksRun: p["totalTasksRun"], + runningTasksCount: p["runningTasksCount"], + runningTaskSlotsCount: p["runningTaskSlotsCount"], + totalTasksSucceeded: p["totalTasksSucceeded"], + recentTasks: (p["recentTasks"] ?? []).map((p) => ({ + taskUrl: p["taskUrl"], + jobId: p["jobId"], + taskId: p["taskId"], + subtaskId: p["subtaskId"], + taskState: p["taskState"], + executionInfo: !p.executionInfo + ? undefined + : { + startTime: + p.executionInfo?.["startTime"] !== undefined + ? new Date(p.executionInfo?.["startTime"]) + : undefined, + endTime: + p.executionInfo?.["endTime"] !== undefined + ? new Date(p.executionInfo?.["endTime"]) + : undefined, + exitCode: p.executionInfo?.["exitCode"], + containerInfo: !p.executionInfo?.containerInfo + ? undefined + : { + containerId: + p.executionInfo?.containerInfo?.["containerId"], + state: p.executionInfo?.containerInfo?.["state"], + error: p.executionInfo?.containerInfo?.["error"], + }, + failureInfo: !p.executionInfo?.failureInfo + ? undefined + : { + category: p.executionInfo?.failureInfo?.["category"], + code: p.executionInfo?.failureInfo?.["code"], + message: p.executionInfo?.failureInfo?.["message"], + details: ( + p.executionInfo?.failureInfo?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + retryCount: p.executionInfo?.["retryCount"], + lastRetryTime: + p.executionInfo?.["lastRetryTime"] !== undefined + ? new Date(p.executionInfo?.["lastRetryTime"]) + : undefined, + requeueCount: p.executionInfo?.["requeueCount"], + lastRequeueTime: + p.executionInfo?.["lastRequeueTime"] !== undefined + ? new Date(p.executionInfo?.["lastRequeueTime"]) + : undefined, + result: p.executionInfo?.["result"], + }, + })), + startTask: !p.startTask + ? undefined + : { + commandLine: p.startTask?.["commandLine"], + containerSettings: !p.startTask?.containerSettings + ? undefined + : { + containerRunOptions: + p.startTask?.containerSettings?.["containerRunOptions"], + imageName: p.startTask?.containerSettings?.["imageName"], + registry: !p.startTask?.containerSettings?.registry + ? undefined + : { + username: + p.startTask?.containerSettings?.registry?.[ + "username" + ], + password: + p.startTask?.containerSettings?.registry?.[ + "password" + ], + registryServer: + p.startTask?.containerSettings?.registry?.[ + "registryServer" + ], + identityReference: !p.startTask?.containerSettings + ?.registry?.identityReference + ? undefined + : { + resourceId: + p.startTask?.containerSettings?.registry + ?.identityReference?.["resourceId"], + }, + }, + workingDirectory: + p.startTask?.containerSettings?.["workingDirectory"], + }, + resourceFiles: (p.startTask?.["resourceFiles"] ?? []).map((p) => ({ + autoStorageContainerName: p["autoStorageContainerName"], + storageContainerUrl: p["storageContainerUrl"], + httpUrl: p["httpUrl"], + blobPrefix: p["blobPrefix"], + filePath: p["filePath"], + fileMode: p["fileMode"], + identityReference: !p.identityReference + ? undefined + : { resourceId: p.identityReference?.["resourceId"] }, + })), + environmentSettings: ( + p.startTask?.["environmentSettings"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + userIdentity: !p.startTask?.userIdentity + ? undefined + : { + username: p.startTask?.userIdentity?.["username"], + autoUser: !p.startTask?.userIdentity?.autoUser + ? undefined + : { + scope: p.startTask?.userIdentity?.autoUser?.["scope"], + elevationLevel: + p.startTask?.userIdentity?.autoUser?.[ + "elevationLevel" + ], + }, + }, + maxTaskRetryCount: p.startTask?.["maxTaskRetryCount"], + waitForSuccess: p.startTask?.["waitForSuccess"], + }, + startTaskInfo: !p.startTaskInfo + ? undefined + : { + state: p.startTaskInfo?.["state"], + startTime: new Date(p.startTaskInfo?.["startTime"]), + endTime: + p.startTaskInfo?.["endTime"] !== undefined + ? new Date(p.startTaskInfo?.["endTime"]) + : undefined, + exitCode: p.startTaskInfo?.["exitCode"], + containerInfo: !p.startTaskInfo?.containerInfo + ? undefined + : { + containerId: p.startTaskInfo?.containerInfo?.["containerId"], + state: p.startTaskInfo?.containerInfo?.["state"], + error: p.startTaskInfo?.containerInfo?.["error"], + }, + failureInfo: !p.startTaskInfo?.failureInfo + ? undefined + : { + category: p.startTaskInfo?.failureInfo?.["category"], + code: p.startTaskInfo?.failureInfo?.["code"], + message: p.startTaskInfo?.failureInfo?.["message"], + details: ( + p.startTaskInfo?.failureInfo?.["details"] ?? [] + ).map((p) => ({ name: p["name"], value: p["value"] })), + }, + retryCount: p.startTaskInfo?.["retryCount"], + lastRetryTime: + p.startTaskInfo?.["lastRetryTime"] !== undefined + ? new Date(p.startTaskInfo?.["lastRetryTime"]) + : undefined, + result: p.startTaskInfo?.["result"], + }, + certificateReferences: (p["certificateReferences"] ?? []).map((p) => ({ + thumbprint: p["thumbprint"], + thumbprintAlgorithm: p["thumbprintAlgorithm"], + storeLocation: p["storeLocation"], + storeName: p["storeName"], + visibility: p["visibility"], + })), + errors: (p["errors"] ?? []).map((p) => ({ + code: p["code"], + message: p["message"], + errorDetails: (p["errorDetails"] ?? []).map((p) => ({ + name: p["name"], + value: p["value"], + })), + })), + isDedicated: p["isDedicated"], + endpointConfiguration: !p.endpointConfiguration + ? undefined + : { + inboundEndpoints: ( + p.endpointConfiguration?.["inboundEndpoints"] ?? [] + ).map((p) => ({ + name: p["name"], + protocol: p["protocol"], + publicIPAddress: p["publicIPAddress"], + publicFQDN: p["publicFQDN"], + frontendPort: p["frontendPort"], + backendPort: p["backendPort"], + })), + }, + nodeAgentInfo: !p.nodeAgentInfo + ? undefined + : { + version: p.nodeAgentInfo?.["version"], + lastUpdateTime: new Date(p.nodeAgentInfo?.["lastUpdateTime"]), + }, + virtualMachineInfo: !p.virtualMachineInfo + ? undefined + : { + imageReference: !p.virtualMachineInfo?.imageReference + ? undefined + : { + publisher: + p.virtualMachineInfo?.imageReference?.["publisher"], + offer: p.virtualMachineInfo?.imageReference?.["offer"], + sku: p.virtualMachineInfo?.imageReference?.["sku"], + version: p.virtualMachineInfo?.imageReference?.["version"], + virtualMachineImageId: + p.virtualMachineInfo?.imageReference?.[ + "virtualMachineImageId" + ], + exactVersion: + p.virtualMachineInfo?.imageReference?.["exactVersion"], + }, + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists the Compute Nodes in the specified Pool. */ +export async function listNodes( + context: Client, + poolId: string, + options: ListNodesOptions = { requestOptions: {} } +): Promise { + const result = await _listNodesSend(context, poolId, options); + return _listNodesDeserialize(result); +} + +export function _getNodeExtensionSend( + context: Client, + poolId: string, + nodeId: string, + extensionName: string, + options: GetNodeExtensionOptions = { requestOptions: {} } +): StreamableMethod< + GetNodeExtension200Response | GetNodeExtensionDefaultResponse +> { + return context + .path( + "/pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}", + poolId, + nodeId, + extensionName + ) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { timeOut: options?.timeOut, $select: options?.$select }, + }); +} + +export async function _getNodeExtensionDeserialize( + result: GetNodeExtension200Response | GetNodeExtensionDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + provisioningState: result.body["provisioningState"], + vmExtension: !result.body.vmExtension + ? undefined + : { + name: result.body.vmExtension?.["name"], + publisher: result.body.vmExtension?.["publisher"], + type: result.body.vmExtension?.["type"], + typeHandlerVersion: result.body.vmExtension?.["typeHandlerVersion"], + autoUpgradeMinorVersion: + result.body.vmExtension?.["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: + result.body.vmExtension?.["enableAutomaticUpgrade"], + settings: result.body.vmExtension?.["settings"], + protectedSettings: result.body.vmExtension?.["protectedSettings"], + provisionAfterExtensions: + result.body.vmExtension?.["provisionAfterExtensions"], + }, + instanceView: !result.body.instanceView + ? undefined + : { + name: result.body.instanceView?.["name"], + statuses: (result.body.instanceView?.["statuses"] ?? []).map((p) => ({ + code: p["code"], + displayStatus: p["displayStatus"], + level: p["level"], + message: p["message"], + time: p["time"], + })), + subStatuses: (result.body.instanceView?.["subStatuses"] ?? []).map( + (p) => ({ + code: p["code"], + displayStatus: p["displayStatus"], + level: p["level"], + message: p["message"], + time: p["time"], + }) + ), + }, + }; +} + +/** Gets information about the specified Compute Node Extension. */ +export async function getNodeExtension( + context: Client, + poolId: string, + nodeId: string, + extensionName: string, + options: GetNodeExtensionOptions = { requestOptions: {} } +): Promise { + const result = await _getNodeExtensionSend( + context, + poolId, + nodeId, + extensionName, + options + ); + return _getNodeExtensionDeserialize(result); +} + +export function _listNodeExtensionsSend( + context: Client, + poolId: string, + nodeId: string, + options: ListNodeExtensionsOptions = { requestOptions: {} } +): StreamableMethod< + ListNodeExtensions200Response | ListNodeExtensionsDefaultResponse +> { + return context + .path("/pools/{poolId}/nodes/{nodeId}/extensions", poolId, nodeId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $select: options?.$select, + }, + }); +} + +export async function _listNodeExtensionsDeserialize( + result: ListNodeExtensions200Response | ListNodeExtensionsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + provisioningState: p["provisioningState"], + vmExtension: !p.vmExtension + ? undefined + : { + name: p.vmExtension?.["name"], + publisher: p.vmExtension?.["publisher"], + type: p.vmExtension?.["type"], + typeHandlerVersion: p.vmExtension?.["typeHandlerVersion"], + autoUpgradeMinorVersion: p.vmExtension?.["autoUpgradeMinorVersion"], + enableAutomaticUpgrade: p.vmExtension?.["enableAutomaticUpgrade"], + settings: p.vmExtension?.["settings"], + protectedSettings: p.vmExtension?.["protectedSettings"], + provisionAfterExtensions: + p.vmExtension?.["provisionAfterExtensions"], + }, + instanceView: !p.instanceView + ? undefined + : { + name: p.instanceView?.["name"], + statuses: (p.instanceView?.["statuses"] ?? []).map((p) => ({ + code: p["code"], + displayStatus: p["displayStatus"], + level: p["level"], + message: p["message"], + time: p["time"], + })), + subStatuses: (p.instanceView?.["subStatuses"] ?? []).map((p) => ({ + code: p["code"], + displayStatus: p["displayStatus"], + level: p["level"], + message: p["message"], + time: p["time"], + })), + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists the Compute Nodes Extensions in the specified Pool. */ +export async function listNodeExtensions( + context: Client, + poolId: string, + nodeId: string, + options: ListNodeExtensionsOptions = { requestOptions: {} } +): Promise { + const result = await _listNodeExtensionsSend( + context, + poolId, + nodeId, + options + ); + return _listNodeExtensionsDeserialize(result); +} + +export function _deleteNodeFileSend( + context: Client, + poolId: string, + nodeId: string, + filePath: string, + options: DeleteNodeFileOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path( + "/pools/{poolId}/nodes/{nodeId}/files/{filePath}", + poolId, + nodeId, + filePath + ) + .delete({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + timeOut: options?.timeOut, + recursive: options?.recursive, + }, + }); +} + +export async function _deleteNodeFileDeserialize( + result: DeleteNodeFile200Response | DeleteNodeFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Deletes the specified file from the Compute Node. */ +export async function deleteNodeFile( + context: Client, + poolId: string, + nodeId: string, + filePath: string, + options: DeleteNodeFileOptions = { requestOptions: {} } +): Promise { + const result = await _deleteNodeFileSend( + context, + poolId, + nodeId, + filePath, + options + ); + return _deleteNodeFileDeserialize(result); +} + +export function _getNodeFileSend( + context: Client, + poolId: string, + nodeId: string, + filePath: string, + options: GetNodeFileOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path( + "/pools/{poolId}/nodes/{nodeId}/files/{filePath}", + poolId, + nodeId, + filePath + ) + .get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + ...(options?.ocpRange !== undefined + ? { "ocp-range": options?.ocpRange } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _getNodeFileDeserialize( + result: GetNodeFile200Response | GetNodeFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return typeof result.body === "string" + ? stringToUint8Array(result.body, "base64") + : result.body; +} + +/** Returns the content of the specified Compute Node file. */ +export async function getNodeFile( + context: Client, + poolId: string, + nodeId: string, + filePath: string, + options: GetNodeFileOptions = { requestOptions: {} } +): Promise { + const result = await _getNodeFileSend( + context, + poolId, + nodeId, + filePath, + options + ); + return _getNodeFileDeserialize(result); +} + +export function _getNodeFilePropertiesSend( + context: Client, + poolId: string, + nodeId: string, + filePath: string, + options: GetNodeFilePropertiesOptions = { requestOptions: {} } +): StreamableMethod< + GetNodeFileProperties200Response | GetNodeFilePropertiesDefaultResponse +> { + return context + .path( + "/pools/{poolId}/nodes/{nodeId}/files/{filePath}", + poolId, + nodeId, + filePath + ) + .head({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifModifiedSince !== undefined + ? { "if-modified-since": options?.ifModifiedSince?.toUTCString() } + : {}), + ...(options?.ifUnmodifiedSince !== undefined + ? { "if-unmodified-since": options?.ifUnmodifiedSince?.toUTCString() } + : {}), + }, + queryParameters: { timeOut: options?.timeOut }, + }); +} + +export async function _getNodeFilePropertiesDeserialize( + result: + | GetNodeFileProperties200Response + | GetNodeFilePropertiesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return; +} + +/** Gets the properties of the specified Compute Node file. */ +export async function getNodeFileProperties( + context: Client, + poolId: string, + nodeId: string, + filePath: string, + options: GetNodeFilePropertiesOptions = { requestOptions: {} } +): Promise { + const result = await _getNodeFilePropertiesSend( + context, + poolId, + nodeId, + filePath, + options + ); + return _getNodeFilePropertiesDeserialize(result); +} + +export function _listNodeFilesSend( + context: Client, + poolId: string, + nodeId: string, + options: ListNodeFilesOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/pools/{poolId}/nodes/{nodeId}/files", poolId, nodeId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + maxresults: options?.maxresults, + timeOut: options?.timeOut, + $filter: options?.$filter, + recursive: options?.recursive, + }, + }); +} + +export async function _listNodeFilesDeserialize( + result: ListNodeFiles200Response | ListNodeFilesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + value: (result.body["value"] ?? []).map((p) => ({ + name: p["name"], + url: p["url"], + isDirectory: p["isDirectory"], + properties: !p.properties + ? undefined + : { + creationTime: + p.properties?.["creationTime"] !== undefined + ? new Date(p.properties?.["creationTime"]) + : undefined, + lastModified: new Date(p.properties?.["lastModified"]), + contentLength: p.properties?.["contentLength"], + contentType: p.properties?.["contentType"], + fileMode: p.properties?.["fileMode"], + }, + })), + "odata.nextLink": result.body["odata.nextLink"], + }; +} + +/** Lists all of the files in Task directories on the specified Compute Node. */ +export async function listNodeFiles( + context: Client, + poolId: string, + nodeId: string, + options: ListNodeFilesOptions = { requestOptions: {} } +): Promise { + const result = await _listNodeFilesSend(context, poolId, nodeId, options); + return _listNodeFilesDeserialize(result); +} diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/index.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/index.ts new file mode 100644 index 0000000000..5b094b02a1 --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/index.ts @@ -0,0 +1,286 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { BatchClient, BatchClientOptions } from "./BatchClient.js"; +export { + ApplicationListResult, + BatchApplication, + BatchError, + ErrorMessage, + BatchErrorDetail, + PoolListUsageMetricsResult, + PoolUsageMetrics, + BatchPoolCreateOptions, + CloudServiceConfiguration, + VirtualMachineConfiguration, + ImageReference, + WindowsConfiguration, + DataDisk, + CachingType, + StorageAccountType, + ContainerConfiguration, + ContainerType, + ContainerRegistry, + BatchNodeIdentityReference, + DiskEncryptionConfiguration, + DiskEncryptionTarget, + NodePlacementConfiguration, + NodePlacementPolicyType, + VMExtension, + OSDisk, + DiffDiskSettings, + DiffDiskPlacement, + NetworkConfiguration, + DynamicVNetAssignmentScope, + PoolEndpointConfiguration, + InboundNATPool, + InboundEndpointProtocol, + NetworkSecurityGroupRule, + NetworkSecurityGroupRuleAccess, + PublicIpAddressConfiguration, + IPAddressProvisioningType, + StartTask, + TaskContainerSettings, + ContainerWorkingDirectory, + ResourceFile, + EnvironmentSetting, + UserIdentity, + AutoUserSpecification, + AutoUserScope, + ElevationLevel, + CertificateReference, + CertificateStoreLocation, + CertificateVisibility, + ApplicationPackageReference, + TaskSchedulingPolicy, + BatchNodeFillType, + UserAccount, + LinuxUserConfiguration, + WindowsUserConfiguration, + LoginMode, + MetadataItem, + MountConfiguration, + AzureBlobFileSystemConfiguration, + NfsMountConfiguration, + CifsMountConfiguration, + AzureFileShareConfiguration, + NodeCommunicationMode, + BatchPoolListResult, + BatchPool, + PoolState, + AllocationState, + ResizeError, + NameValuePair, + AutoScaleRun, + AutoScaleRunError, + PoolStatistics, + UsageStatistics, + ResourceStatistics, + BatchPoolIdentity, + PoolIdentityType, + UserAssignedIdentity, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchNodeDeallocationOption, + BatchPoolReplaceOptions, + NodeRemoveOptions, + AccountListSupportedImagesResult, + ImageInformation, + OSType, + VerificationType, + PoolNodeCountsListResult, + PoolNodeCounts, + NodeCounts, + BatchJob, + JobState, + JobConstraints, + JobManagerTask, + OutputFile, + OutputFileDestination, + OutputFileBlobContainerDestination, + HttpHeader, + OutputFileUploadOptions, + OutputFileUploadCondition, + TaskConstraints, + AuthenticationTokenSettings, + AccessScope, + JobPreparationTask, + JobReleaseTask, + PoolInformation, + AutoPoolSpecification, + PoolLifetimeOption, + PoolSpecification, + OnAllTasksComplete, + OnTaskFailure, + JobNetworkConfiguration, + JobExecutionInformation, + JobSchedulingError, + ErrorCategory, + JobStatistics, + BatchJobUpdateOptions, + BatchJobDisableOptions, + DisableJobOption, + BatchJobTerminateOptions, + BatchJobCreateOptions, + BatchJobListResult, + BatchJobListPreparationAndReleaseTaskStatusResult, + JobPreparationAndReleaseTaskExecutionInformation, + JobPreparationTaskExecutionInformation, + JobPreparationTaskState, + TaskContainerExecutionInformation, + TaskFailureInformation, + TaskExecutionResult, + JobReleaseTaskExecutionInformation, + JobReleaseTaskState, + TaskCountsResult, + TaskCounts, + TaskSlotCounts, + BatchCertificate, + CertificateState, + DeleteCertificateError, + CertificateFormat, + CertificateListResult, + BatchJobSchedule, + JobScheduleState, + Schedule, + JobSpecification, + JobScheduleExecutionInformation, + RecentJob, + JobScheduleStatistics, + BatchJobScheduleUpdateOptions, + BatchJobScheduleCreateOptions, + BatchJobScheduleListResult, + BatchTaskCreateOptions, + ExitConditions, + ExitCodeMapping, + ExitOptions, + JobAction, + DependencyAction, + ExitCodeRangeMapping, + AffinityInformation, + MultiInstanceSettings, + TaskDependencies, + TaskIdRange, + BatchTaskListResult, + BatchTask, + TaskState, + TaskExecutionInformation, + BatchNodeInformation, + TaskStatistics, + BatchTaskCollection, + TaskAddCollectionResult, + TaskAddResult, + TaskAddStatus, + BatchTaskListSubtasksResult, + SubtaskInformation, + SubtaskState, + NodeFileListResult, + NodeFile, + FileProperties, + BatchNodeUserCreateOptions, + BatchNodeUserUpdateOptions, + BatchNode, + BatchNodeState, + SchedulingState, + TaskInformation, + StartTaskInformation, + StartTaskState, + BatchNodeError, + BatchNodeEndpointConfiguration, + InboundEndpoint, + NodeAgentInformation, + VirtualMachineInfo, + NodeRebootOptions, + BatchNodeRebootOption, + NodeReimageOptions, + BatchNodeReimageOption, + NodeDisableSchedulingOptions, + DisableBatchNodeSchedulingOption, + BatchNodeRemoteLoginSettingsResult, + UploadBatchServiceLogsOptions, + UploadBatchServiceLogsResult, + BatchNodeListResult, + NodeVMExtension, + VMExtensionInstanceView, + InstanceViewStatus, + StatusLevelTypes, + NodeVMExtensionList, + ListApplicationsOptions, + GetApplicationOptions, + ListPoolUsageMetricsOptions, + CreatePoolOptions, + ListPoolsOptions, + DeletePoolOptions, + PoolExistsOptions, + GetPoolOptions, + UpdatePoolOptions, + DisablePoolAutoScaleOptions, + EnablePoolAutoScaleOptions, + EvaluatePoolAutoScaleOptions, + ResizePoolOptions, + StopPoolResizeOptions, + ReplacePoolPropertiesOptions, + RemoveNodesOptions, + ListSupportedImagesOptions, + ListPoolNodeCountsOptions, + DeleteJobOptions, + GetJobOptions, + UpdateJobOptions, + ReplaceJobOptions, + DisableJobOptions, + EnableJobOptions, + TerminateJobOptions, + CreateJobOptions, + ListJobsOptions, + ListJobsFromScheduleOptions, + ListJobPreparationAndReleaseTaskStatusOptions, + GetJobTaskCountsOptions, + CreateCertificateOptions, + ListCertificatesOptions, + CancelCertificateDeletionOptions, + DeleteCertificateOptions, + GetCertificateOptions, + JobScheduleExistsOptions, + DeleteJobScheduleOptions, + GetJobScheduleOptions, + UpdateJobScheduleOptions, + ReplaceJobScheduleOptions, + DisableJobScheduleOptions, + EnableJobScheduleOptions, + TerminateJobScheduleOptions, + CreateJobScheduleOptions, + ListJobSchedulesOptions, + CreateTaskOptions, + ListTasksOptions, + CreateTaskCollectionOptions, + DeleteTaskOptions, + GetTaskOptions, + ReplaceTaskOptions, + ListSubTasksOptions, + TerminateTaskOptions, + ReactivateTaskOptions, + DeleteTaskFileOptions, + GetTaskFileOptions, + GetTaskFilePropertiesOptions, + ListTaskFilesOptions, + CreateNodeUserOptions, + DeleteNodeUserOptions, + ReplaceNodeUserOptions, + GetNodeOptions, + RebootNodeOptions, + ReimageNodeOptions, + DisableNodeSchedulingOptions, + EnableNodeSchedulingOptions, + GetNodeRemoteLoginSettingsOptions, + GetNodeRemoteDesktopFileOptions, + UploadNodeLogsOptions, + ListNodesOptions, + GetNodeExtensionOptions, + ListNodeExtensionsOptions, + DeleteNodeFileOptions, + GetNodeFileOptions, + GetNodeFilePropertiesOptions, + ListNodeFilesOptions, +} from "./models/index.js"; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/index.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/index.ts new file mode 100644 index 0000000000..f76889ad81 --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/index.ts @@ -0,0 +1,287 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { + ApplicationListResult, + BatchApplication, + BatchError, + ErrorMessage, + BatchErrorDetail, + PoolListUsageMetricsResult, + PoolUsageMetrics, + BatchPoolCreateOptions, + CloudServiceConfiguration, + VirtualMachineConfiguration, + ImageReference, + WindowsConfiguration, + DataDisk, + CachingType, + StorageAccountType, + ContainerConfiguration, + ContainerType, + ContainerRegistry, + BatchNodeIdentityReference, + DiskEncryptionConfiguration, + DiskEncryptionTarget, + NodePlacementConfiguration, + NodePlacementPolicyType, + VMExtension, + OSDisk, + DiffDiskSettings, + DiffDiskPlacement, + NetworkConfiguration, + DynamicVNetAssignmentScope, + PoolEndpointConfiguration, + InboundNATPool, + InboundEndpointProtocol, + NetworkSecurityGroupRule, + NetworkSecurityGroupRuleAccess, + PublicIpAddressConfiguration, + IPAddressProvisioningType, + StartTask, + TaskContainerSettings, + ContainerWorkingDirectory, + ResourceFile, + EnvironmentSetting, + UserIdentity, + AutoUserSpecification, + AutoUserScope, + ElevationLevel, + CertificateReference, + CertificateStoreLocation, + CertificateVisibility, + ApplicationPackageReference, + TaskSchedulingPolicy, + BatchNodeFillType, + UserAccount, + LinuxUserConfiguration, + WindowsUserConfiguration, + LoginMode, + MetadataItem, + MountConfiguration, + AzureBlobFileSystemConfiguration, + NfsMountConfiguration, + CifsMountConfiguration, + AzureFileShareConfiguration, + NodeCommunicationMode, + BatchPoolListResult, + BatchPool, + PoolState, + AllocationState, + ResizeError, + NameValuePair, + AutoScaleRun, + AutoScaleRunError, + PoolStatistics, + UsageStatistics, + ResourceStatistics, + BatchPoolIdentity, + PoolIdentityType, + UserAssignedIdentity, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchNodeDeallocationOption, + BatchPoolReplaceOptions, + NodeRemoveOptions, + AccountListSupportedImagesResult, + ImageInformation, + OSType, + VerificationType, + PoolNodeCountsListResult, + PoolNodeCounts, + NodeCounts, + BatchJob, + JobState, + JobConstraints, + JobManagerTask, + OutputFile, + OutputFileDestination, + OutputFileBlobContainerDestination, + HttpHeader, + OutputFileUploadOptions, + OutputFileUploadCondition, + TaskConstraints, + AuthenticationTokenSettings, + AccessScope, + JobPreparationTask, + JobReleaseTask, + PoolInformation, + AutoPoolSpecification, + PoolLifetimeOption, + PoolSpecification, + OnAllTasksComplete, + OnTaskFailure, + JobNetworkConfiguration, + JobExecutionInformation, + JobSchedulingError, + ErrorCategory, + JobStatistics, + BatchJobUpdateOptions, + BatchJobDisableOptions, + DisableJobOption, + BatchJobTerminateOptions, + BatchJobCreateOptions, + BatchJobListResult, + BatchJobListPreparationAndReleaseTaskStatusResult, + JobPreparationAndReleaseTaskExecutionInformation, + JobPreparationTaskExecutionInformation, + JobPreparationTaskState, + TaskContainerExecutionInformation, + TaskFailureInformation, + TaskExecutionResult, + JobReleaseTaskExecutionInformation, + JobReleaseTaskState, + TaskCountsResult, + TaskCounts, + TaskSlotCounts, + BatchCertificate, + CertificateState, + DeleteCertificateError, + CertificateFormat, + CertificateListResult, + BatchJobSchedule, + JobScheduleState, + Schedule, + JobSpecification, + JobScheduleExecutionInformation, + RecentJob, + JobScheduleStatistics, + BatchJobScheduleUpdateOptions, + BatchJobScheduleCreateOptions, + BatchJobScheduleListResult, + BatchTaskCreateOptions, + ExitConditions, + ExitCodeMapping, + ExitOptions, + JobAction, + DependencyAction, + ExitCodeRangeMapping, + AffinityInformation, + MultiInstanceSettings, + TaskDependencies, + TaskIdRange, + BatchTaskListResult, + BatchTask, + TaskState, + TaskExecutionInformation, + BatchNodeInformation, + TaskStatistics, + BatchTaskCollection, + TaskAddCollectionResult, + TaskAddResult, + TaskAddStatus, + BatchTaskListSubtasksResult, + SubtaskInformation, + SubtaskState, + NodeFileListResult, + NodeFile, + FileProperties, + BatchNodeUserCreateOptions, + BatchNodeUserUpdateOptions, + BatchNode, + BatchNodeState, + SchedulingState, + TaskInformation, + StartTaskInformation, + StartTaskState, + BatchNodeError, + BatchNodeEndpointConfiguration, + InboundEndpoint, + NodeAgentInformation, + VirtualMachineInfo, + NodeRebootOptions, + BatchNodeRebootOption, + NodeReimageOptions, + BatchNodeReimageOption, + NodeDisableSchedulingOptions, + DisableBatchNodeSchedulingOption, + BatchNodeRemoteLoginSettingsResult, + UploadBatchServiceLogsOptions, + UploadBatchServiceLogsResult, + BatchNodeListResult, + NodeVMExtension, + VMExtensionInstanceView, + InstanceViewStatus, + StatusLevelTypes, + NodeVMExtensionList, +} from "./models.js"; +export { + ListApplicationsOptions, + GetApplicationOptions, + ListPoolUsageMetricsOptions, + CreatePoolOptions, + ListPoolsOptions, + DeletePoolOptions, + PoolExistsOptions, + GetPoolOptions, + UpdatePoolOptions, + DisablePoolAutoScaleOptions, + EnablePoolAutoScaleOptions, + EvaluatePoolAutoScaleOptions, + ResizePoolOptions, + StopPoolResizeOptions, + ReplacePoolPropertiesOptions, + RemoveNodesOptions, + ListSupportedImagesOptions, + ListPoolNodeCountsOptions, + DeleteJobOptions, + GetJobOptions, + UpdateJobOptions, + ReplaceJobOptions, + DisableJobOptions, + EnableJobOptions, + TerminateJobOptions, + CreateJobOptions, + ListJobsOptions, + ListJobsFromScheduleOptions, + ListJobPreparationAndReleaseTaskStatusOptions, + GetJobTaskCountsOptions, + CreateCertificateOptions, + ListCertificatesOptions, + CancelCertificateDeletionOptions, + DeleteCertificateOptions, + GetCertificateOptions, + JobScheduleExistsOptions, + DeleteJobScheduleOptions, + GetJobScheduleOptions, + UpdateJobScheduleOptions, + ReplaceJobScheduleOptions, + DisableJobScheduleOptions, + EnableJobScheduleOptions, + TerminateJobScheduleOptions, + CreateJobScheduleOptions, + ListJobSchedulesOptions, + CreateTaskOptions, + ListTasksOptions, + CreateTaskCollectionOptions, + DeleteTaskOptions, + GetTaskOptions, + ReplaceTaskOptions, + ListSubTasksOptions, + TerminateTaskOptions, + ReactivateTaskOptions, + DeleteTaskFileOptions, + GetTaskFileOptions, + GetTaskFilePropertiesOptions, + ListTaskFilesOptions, + CreateNodeUserOptions, + DeleteNodeUserOptions, + ReplaceNodeUserOptions, + GetNodeOptions, + RebootNodeOptions, + ReimageNodeOptions, + DisableNodeSchedulingOptions, + EnableNodeSchedulingOptions, + GetNodeRemoteLoginSettingsOptions, + GetNodeRemoteDesktopFileOptions, + UploadNodeLogsOptions, + ListNodesOptions, + GetNodeExtensionOptions, + ListNodeExtensionsOptions, + DeleteNodeFileOptions, + GetNodeFileOptions, + GetNodeFilePropertiesOptions, + ListNodeFilesOptions, +} from "./options.js"; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/models.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/models.ts new file mode 100644 index 0000000000..69a4c6c4bf --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/models.ts @@ -0,0 +1,2631 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/** The result of listing the applications available in an Account. */ +export interface ApplicationListResult { + /** The list of applications available in the Account. */ + value?: BatchApplication[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** Contains information about an application in an Azure Batch Account. */ +export interface BatchApplication { + /** A string that uniquely identifies the application within the Account. */ + id: string; + /** The display name for the application. */ + displayName: string; + /** The list of available versions of the application. */ + versions: string[]; +} + +/** An error response received from the Azure Batch service. */ +export interface BatchError { + /** An identifier for the error. Codes are invariant and are intended to be consumed programmatically. */ + code: string; + /** A message describing the error, intended to be suitable for display in a user interface. */ + message?: ErrorMessage; + /** A collection of key-value pairs containing additional details about the error. */ + values?: BatchErrorDetail[]; +} + +/** An error message received in an Azure Batch error response. */ +export interface ErrorMessage { + /** The language code of the error message. */ + lang?: string; + /** The text of the message. */ + value?: string; +} + +/** An item of additional information included in an Azure Batch error response. */ +export interface BatchErrorDetail { + /** An identifier specifying the meaning of the Value property. */ + key?: string; + /** The additional information included with the error response. */ + value?: string; +} + +/** The result of a listing the usage metrics for an Account. */ +export interface PoolListUsageMetricsResult { + /** The Pool usage metrics data. */ + value?: PoolUsageMetrics[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** Usage metrics for a Pool across an aggregation interval. */ +export interface PoolUsageMetrics { + /** The ID of the Pool whose metrics are aggregated in this entry. */ + poolId: string; + /** The start time of the aggregation interval covered by this entry. */ + startTime: Date; + /** The end time of the aggregation interval covered by this entry. */ + endTime: Date; + /** The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + vmSize: string; + /** The total core hours used in the Pool during this aggregation interval. */ + totalCoreHours: number; +} + +/** Options for creating an Azure Batch Pool. */ +export interface BatchPoolCreateOptions { + /** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). */ + id: string; + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines for Cloud Services Pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and A2V2. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */ + vmSize: string; + /** The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ + cloudServiceConfiguration?: CloudServiceConfiguration; + /** The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. */ + virtualMachineConfiguration?: VirtualMachineConfiguration; + /** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + resizeTimeout?: string; + /** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ + targetDedicatedNodes?: number; + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ + targetLowPriorityNodes?: number; + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */ + enableAutoScale?: boolean; + /** A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). */ + autoScaleFormula?: string; + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + autoScaleEvaluationInterval?: string; + /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. */ + enableInterNodeCommunication?: boolean; + /** The network configuration for the Pool. */ + networkConfiguration?: NetworkConfiguration; + /** A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. */ + startTask?: StartTask; + /** + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + certificateReferences?: CertificateReference[]; + /** The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. */ + applicationLicenses?: string[]; + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ + taskSlotsPerNode?: number; + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + taskSchedulingPolicy?: TaskSchedulingPolicy; + /** The list of user Accounts to be created on each Compute Node in the Pool. */ + userAccounts?: UserAccount[]; + /** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; + /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. */ + mountConfiguration?: MountConfiguration[]; + /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + targetNodeCommunicationMode?: NodeCommunicationMode; +} + +/** + * The configuration for Compute Nodes in a Pool based on the Azure Cloud Services + * platform. + */ +export interface CloudServiceConfiguration { + /** + * Possible values are: + * 2 - OS Family 2, equivalent to Windows Server 2008 R2 + * SP1. + * 3 - OS Family 3, equivalent to Windows Server 2012. + * 4 - OS Family 4, + * equivalent to Windows Server 2012 R2. + * 5 - OS Family 5, equivalent to Windows + * Server 2016. + * 6 - OS Family 6, equivalent to Windows Server 2019. For more + * information, see Azure Guest OS Releases + * (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + */ + osFamily: string; + /** The Azure Guest OS version to be installed on the virtual machines in the Pool. The default value is * which specifies the latest operating system version for the specified OS family. */ + osVersion?: string; +} + +/** + * The configuration for Compute Nodes in a Pool based on the Azure Virtual + * Machines infrastructure. + */ +export interface VirtualMachineConfiguration { + /** A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. */ + imageReference: ImageReference; + /** The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. */ + nodeAgentSKUId: string; + /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ + windowsConfiguration?: WindowsConfiguration; + /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ + dataDisks?: DataDisk[]; + /** + * This only applies to Images that contain the Windows operating system, and + * should only be used when you hold valid on-premises licenses for the Compute + * Nodes which will be deployed. If omitted, no on-premises licensing discount is + * applied. Values are: + * + * Windows_Server - The on-premises license is for Windows + * Server. + * Windows_Client - The on-premises license is for Windows Client. + * + */ + licenseType?: string; + /** The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. */ + containerConfiguration?: ContainerConfiguration; + /** The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. */ + diskEncryptionConfiguration?: DiskEncryptionConfiguration; + /** The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. */ + nodePlacementConfiguration?: NodePlacementConfiguration; + /** The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. */ + extensions?: VMExtension[]; + /** Settings for the operating system disk of the Virtual Machine. */ + osDisk?: OSDisk; +} + +/** + * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image + * Gallery Image. To get the list of all Azure Marketplace Image references + * verified by Azure Batch, see the 'List Supported Images' operation. + */ +export interface ImageReference { + /** The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. */ + publisher?: string; + /** The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. */ + offer?: string; + /** The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. */ + sku?: string; + /** The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. */ + version?: string; + /** The ARM resource identifier of the Shared Image Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Shared Image Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + virtualMachineImageId?: string; + /** The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. */ + readonly exactVersion?: string; +} + +/** Windows operating system settings to apply to the virtual machine. */ +export interface WindowsConfiguration { + /** Whether automatic updates are enabled on the virtual machine. If omitted, the default value is true. */ + enableAutomaticUpdates?: boolean; +} + +/** + * Settings which will be used by the data disks associated to Compute Nodes in + * the Pool. When using attached data disks, you need to mount and format the + * disks from within a VM to use them. + */ +export interface DataDisk { + /** The logical unit number. The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. The value must be between 0 and 63, inclusive. */ + lun: number; + /** The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. */ + caching?: CachingType; + /** The initial disk size in gigabytes. */ + diskSizeGB: number; + /** The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". */ + storageAccountType?: StorageAccountType; +} + +/** CachingType enums */ +/** "none", "readonly", "readwrite" */ +export type CachingType = string; +/** StorageAccountType enums */ +/** "standard_lrs", "premium_lrs" */ +export type StorageAccountType = string; + +/** The configuration for container-enabled Pools. */ +export interface ContainerConfiguration { + /** The container technology to be used. */ + type: ContainerType; + /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ + containerImageNames?: string[]; + /** Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. */ + containerRegistries?: ContainerRegistry[]; +} + +/** ContainerType enums */ +/** "dockerCompatible", "criCompatible" */ +export type ContainerType = string; + +/** A private container registry. */ +export interface ContainerRegistry { + /** The user name to log into the registry server. */ + username?: string; + /** The password to log into the registry server. */ + password?: string; + /** The registry URL. If omitted, the default is "docker.io". */ + registryServer?: string; + /** The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. */ + identityReference?: BatchNodeIdentityReference; +} + +/** + * The reference to a user assigned identity associated with the Batch pool which + * a compute node will use. + */ +export interface BatchNodeIdentityReference { + /** The ARM resource id of the user assigned identity. */ + resourceId?: string; +} + +/** + * The disk encryption configuration applied on compute nodes in the pool. Disk + * encryption configuration is not supported on Linux pool created with Shared + * Image Gallery Image. + */ +export interface DiskEncryptionConfiguration { + /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ + targets?: DiskEncryptionTarget[]; +} + +/** DiskEncryptionTarget enums */ +/** "osdisk", "temporarydisk" */ +export type DiskEncryptionTarget = string; + +/** + * For regional placement, nodes in the pool will be allocated in the same region. + * For zonal placement, nodes in the pool will be spread across different zones + * with best effort balancing. + */ +export interface NodePlacementConfiguration { + /** Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. */ + policy?: NodePlacementPolicyType; +} + +/** NodePlacementPolicyType enums */ +/** "regional", "zonal" */ +export type NodePlacementPolicyType = string; + +/** The configuration for virtual machine extensions. */ +export interface VMExtension { + /** The name of the virtual machine extension. */ + name: string; + /** The name of the extension handler publisher. */ + publisher: string; + /** The type of the extension. */ + type: string; + /** The version of script handler. */ + typeHandlerVersion?: string; + /** Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. */ + autoUpgradeMinorVersion?: boolean; + /** Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available. */ + enableAutomaticUpgrade?: boolean; + /** JSON formatted public settings for the extension. */ + settings?: Record; + /** The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. */ + protectedSettings?: Record; + /** The collection of extension names. Collection of extension names after which this extension needs to be provisioned. */ + provisionAfterExtensions?: string[]; +} + +/** Settings for the operating system disk of the compute node (VM). */ +export interface OSDisk { + /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ + ephemeralOSDiskSettings?: DiffDiskSettings; +} + +/** + * Specifies the ephemeral Disk Settings for the operating system disk used by the + * compute node (VM). + */ +export interface DiffDiskSettings { + /** Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */ + placement?: DiffDiskPlacement; +} + +/** AccessDiffDiskPlacementScope enums */ +/** "cachedisk" */ +export type DiffDiskPlacement = string; + +/** The network configuration for a Pool. */ +export interface NetworkConfiguration { + /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools created with cloudServiceConfiguration both ARM and classic virtual networks are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For Pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + subnetId?: string; + /** The scope of dynamic vnet assignment. */ + dynamicVNetAssignmentScope?: DynamicVNetAssignmentScope; + /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ + endpointConfiguration?: PoolEndpointConfiguration; + /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ + publicIPAddressConfiguration?: PublicIpAddressConfiguration; + /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ + enableAcceleratedNetworking?: boolean; +} + +/** DynamicVNetAssignmentScope enums */ +/** "none", "job" */ +export type DynamicVNetAssignmentScope = string; + +/** The endpoint configuration for a Pool. */ +export interface PoolEndpointConfiguration { + /** A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. */ + inboundNATPools: InboundNATPool[]; +} + +/** + * A inbound NAT Pool that can be used to address specific ports on Compute Nodes + * in a Batch Pool externally. + */ +export interface InboundNATPool { + /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ + name: string; + /** The protocol of the endpoint. */ + protocol: InboundEndpointProtocol; + /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ + backendPort: number; + /** The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ + frontendPortRangeStart: number; + /** The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ + frontendPortRangeEnd: number; + /** A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. */ + networkSecurityGroupRules?: NetworkSecurityGroupRule[]; +} + +/** InboundEndpointProtocol enums */ +/** "tcp", "udp" */ +export type InboundEndpointProtocol = string; + +/** A network security group rule to apply to an inbound endpoint. */ +export interface NetworkSecurityGroupRule { + /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ + priority: number; + /** The action that should be taken for a specified IP address, subnet range or tag. */ + access: NetworkSecurityGroupRuleAccess; + /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ + sourceAddressPrefix: string; + /** The source port ranges to match for the rule. Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'. */ + sourcePortRanges?: string[]; +} + +/** NetworkSecurityGroupRuleAccess enums */ +/** "allow", "deny" */ +export type NetworkSecurityGroupRuleAccess = string; + +/** The public IP Address configuration of the networking configuration of a Pool. */ +export interface PublicIpAddressConfiguration { + /** The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. */ + provision?: IPAddressProvisioningType; + /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ + ipAddressIds?: string[]; +} + +/** IPAddressProvisioningType enums */ +/** "batchmanaged", "usermanaged", "nopublicipaddresses" */ +export type IPAddressProvisioningType = string; + +/** + * Batch will retry Tasks when a recovery operation is triggered on a Node. + * Examples of recovery operations include (but are not limited to) when an + * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + * Retries due to recovery operations are independent of and are not counted + * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + * retry due to a recovery operation may occur. Because of this, all Tasks should + * be idempotent. This means Tasks need to tolerate being interrupted and + * restarted without causing any corruption or duplicate data. The best practice + * for long running Tasks is to use some form of checkpointing. In some cases the + * StartTask may be re-run even though the Compute Node was not rebooted. Special + * care should be taken to avoid StartTasks which create breakaway process or + * install/launch services from the StartTask working directory, as this will + * block Batch from being able to re-run the StartTask. + */ +export interface StartTask { + /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. */ + resourceFiles?: ResourceFile[]; + /** A list of environment variable settings for the StartTask. */ + environmentSettings?: EnvironmentSetting[]; + /** The user identity under which the StartTask runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + userIdentity?: UserIdentity; + /** The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). */ + maxTaskRetryCount?: number; + /** Whether the Batch service should wait for the StartTask to complete successfully (that is, to exit with exit code 0) before scheduling any Tasks on the Compute Node. If true and the StartTask fails on a Node, the Batch service retries the StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has still not completed successfully after all retries, then the Batch service marks the Node unusable, and will not schedule Tasks to it. This condition can be detected via the Compute Node state and failure info details. If false, the Batch service will not wait for the StartTask to complete. In this case, other Tasks can start executing on the Compute Node while the StartTask is still running; and even if the StartTask fails, new Tasks will continue to be scheduled on the Compute Node. The default is true. */ + waitForSuccess?: boolean; +} + +/** The container settings for a Task. */ +export interface TaskContainerSettings { + /** Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. */ + containerRunOptions?: string; + /** The Image to use to create the container in which the Task will run. This is the full Image reference, as would be specified to "docker pull". If no tag is provided as part of the Image name, the tag ":latest" is used as a default. */ + imageName: string; + /** The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. */ + registry?: ContainerRegistry; + /** The location of the container Task working directory. The default is 'taskWorkingDirectory'. */ + workingDirectory?: ContainerWorkingDirectory; +} + +/** ContainerWorkingDirectory enums */ +/** "taskWorkingDirectory", "containerImageDefault" */ +export type ContainerWorkingDirectory = string; + +/** A single file or multiple files to be downloaded to a Compute Node. */ +export interface ResourceFile { + /** The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. */ + autoStorageContainerName?: string; + /** The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. */ + storageContainerUrl?: string; + /** The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access. */ + httpUrl?: string; + /** The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded. */ + blobPrefix?: string; + /** The location on the Compute Node to which to download the file(s), relative to the Task's working directory. If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..'). */ + filePath?: string; + /** The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file. */ + fileMode?: string; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by storageContainerUrl or httpUrl. */ + identityReference?: BatchNodeIdentityReference; +} + +/** An environment variable to be set on a Task process. */ +export interface EnvironmentSetting { + /** The name of the environment variable. */ + name: string; + /** The value of the environment variable. */ + value?: string; +} + +/** The definition of the user identity under which the Task is run. Specify either the userName or autoUser property, but not both. */ +export interface UserIdentity { + /** The name of the user identity under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. */ + username?: string; + /** The auto user under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. */ + autoUser?: AutoUserSpecification; +} + +/** Specifies the options for the auto user that runs an Azure Batch Task. */ +export interface AutoUserSpecification { + /** The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks. */ + scope?: AutoUserScope; + /** The elevation level of the auto user. The default value is nonAdmin. */ + elevationLevel?: ElevationLevel; +} + +/** AutoUserScope enums */ +/** "task", "pool" */ +export type AutoUserScope = string; +/** ElevationLevel enums */ +/** "nonadmin", "admin" */ +export type ElevationLevel = string; + +/** A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ +export interface CertificateReference { + /** The thumbprint of the Certificate. */ + thumbprint: string; + /** The algorithm with which the thumbprint is associated. This must be sha1. */ + thumbprintAlgorithm: string; + /** The location of the Certificate store on the Compute Node into which to install the Certificate. The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. */ + storeLocation?: CertificateStoreLocation; + /** The name of the Certificate store on the Compute Node into which to install the Certificate. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. */ + storeName?: string; + /** Which user Accounts on the Compute Node should have access to the private data of the Certificate. You can specify more than one visibility in this collection. The default is all Accounts. */ + visibility?: CertificateVisibility[]; +} + +/** CertificateStoreLocation enums */ +/** "currentuser", "localmachine" */ +export type CertificateStoreLocation = string; +/** CertificateVisibility enums */ +/** "starttask", "task", "remoteuser" */ +export type CertificateVisibility = string; + +/** A reference to an Package to be deployed to Compute Nodes. */ +export interface ApplicationPackageReference { + /** The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). */ + applicationId: string; + /** The version of the application to deploy. If omitted, the default version is deployed. If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error. */ + version?: string; +} + +/** Specifies how Tasks should be distributed across Compute Nodes. */ +export interface TaskSchedulingPolicy { + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + nodeFillType: BatchNodeFillType; +} + +/** BatchNodeFillType enums */ +/** "spread", "pack" */ +export type BatchNodeFillType = string; + +/** + * Properties used to create a user used to execute Tasks on an Azure Batch + * Compute Node. + */ +export interface UserAccount { + /** The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. */ + name: string; + /** The password for the user Account. */ + password: string; + /** The elevation level of the user Account. The default value is nonAdmin. */ + elevationLevel?: ElevationLevel; + /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ + linuxUserConfiguration?: LinuxUserConfiguration; + /** The Windows-specific user configuration for the user Account. This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options. */ + windowsUserConfiguration?: WindowsUserConfiguration; +} + +/** Properties used to create a user Account on a Linux Compute Node. */ +export interface LinuxUserConfiguration { + /** The user ID of the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid. */ + uid?: number; + /** The group ID for the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid. */ + gid?: number; + /** The SSH private key for the user Account. The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done). */ + sshPrivateKey?: string; +} + +/** Properties used to create a user Account on a Windows Compute Node. */ +export interface WindowsUserConfiguration { + /** The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch' and for CloudServiceConfiguration Pools is 'interactive'. */ + loginMode?: LoginMode; +} + +/** LoginMode enums */ +/** "batch", "interactive" */ +export type LoginMode = string; + +/** + * The Batch service does not assign any meaning to this metadata; it is solely + * for the use of user code. + */ +export interface MetadataItem { + /** The name of the metadata item. */ + name: string; + /** The value of the metadata item. */ + value: string; +} + +/** The file system to mount on each node. */ +export interface MountConfiguration { + /** The Azure Storage Container to mount using blob FUSE on each node. This property is mutually exclusive with all other properties. */ + azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; + /** The NFS file system to mount on each node. This property is mutually exclusive with all other properties. */ + nfsMountConfiguration?: NfsMountConfiguration; + /** The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all other properties. */ + cifsMountConfiguration?: CifsMountConfiguration; + /** The Azure File Share to mount on each node. This property is mutually exclusive with all other properties. */ + azureFileShareConfiguration?: AzureFileShareConfiguration; +} + +/** Information used to connect to an Azure Storage Container using Blobfuse. */ +export interface AzureBlobFileSystemConfiguration { + /** The Azure Storage Account name. */ + accountName: string; + /** The Azure Blob Storage Container name. */ + containerName: string; + /** The Azure Storage Account key. This property is mutually exclusive with both sasKey and identity; exactly one must be specified. */ + accountKey?: string; + /** The Azure Storage SAS token. This property is mutually exclusive with both accountKey and identity; exactly one must be specified. */ + sasKey?: string; + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ + blobfuseOptions?: string; + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ + relativeMountPath: string; + /** The reference to the user assigned identity to use to access containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one must be specified. */ + identityReference?: BatchNodeIdentityReference; +} + +/** Information used to connect to an NFS file system. */ +export interface NfsMountConfiguration { + /** The URI of the file system to mount. */ + source: string; + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ + relativeMountPath: string; + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ + mountOptions?: string; +} + +/** Information used to connect to a CIFS file system. */ +export interface CifsMountConfiguration { + /** The user to use for authentication against the CIFS file system. */ + username: string; + /** The URI of the file system to mount. */ + source: string; + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ + relativeMountPath: string; + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ + mountOptions?: string; + /** The password to use for authentication against the CIFS file system. */ + password: string; +} + +/** Information used to connect to an Azure Fileshare. */ +export interface AzureFileShareConfiguration { + /** The Azure Storage account name. */ + accountName: string; + /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */ + azureFileUrl: string; + /** The Azure Storage account key. */ + accountKey: string; + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ + relativeMountPath: string; + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ + mountOptions?: string; +} + +/** NodeCommunicationMode enums */ +/** "default", "classic", "simplified" */ +export type NodeCommunicationMode = string; + +/** The result of listing the Pools in an Account. */ +export interface BatchPoolListResult { + /** The list of Pools. */ + value?: BatchPool[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** A Pool in the Azure Batch service. */ +export interface BatchPool { + /** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + readonly id?: string; + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + readonly displayName?: string; + /** The URL of the Pool. */ + readonly url?: string; + /** The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. */ + readonly lastModified?: Date; + /** The creation time of the Pool. */ + readonly creationTime?: Date; + /** The current state of the Pool. */ + readonly state?: PoolState; + /** The time at which the Pool entered its current state. */ + readonly stateTransitionTime?: Date; + /** Whether the Pool is resizing. */ + readonly allocationState?: AllocationState; + /** The time at which the Pool entered its current allocation state. */ + readonly allocationStateTransitionTime?: Date; + /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + readonly vmSize?: string; + /** The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ + readonly cloudServiceConfiguration?: CloudServiceConfiguration; + /** The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. */ + readonly virtualMachineConfiguration?: VirtualMachineConfiguration; + /** The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. */ + readonly resizeTimeout?: string; + /** A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. */ + readonly resizeErrors?: ResizeError[]; + /** The number of dedicated Compute Nodes currently in the Pool. */ + readonly currentDedicatedNodes?: number; + /** The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. */ + readonly currentLowPriorityNodes?: number; + /** The desired number of dedicated Compute Nodes in the Pool. */ + readonly targetDedicatedNodes?: number; + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ + readonly targetLowPriorityNodes?: number; + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */ + readonly enableAutoScale?: boolean; + /** A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleFormula?: string; + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleEvaluationInterval?: string; + /** The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleRun?: AutoScaleRun; + /** Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. */ + readonly enableInterNodeCommunication?: boolean; + /** The network configuration for the Pool. */ + readonly networkConfiguration?: NetworkConfiguration; + /** A Task specified to run on each Compute Node as it joins the Pool. */ + startTask?: StartTask; + /** + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + readonly certificateReferences?: CertificateReference[]; + /** The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ + readonly applicationPackageReferences?: ApplicationPackageReference[]; + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. */ + readonly applicationLicenses?: string[]; + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ + readonly taskSlotsPerNode?: number; + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + readonly taskSchedulingPolicy?: TaskSchedulingPolicy; + /** The list of user Accounts to be created on each Compute Node in the Pool. */ + readonly userAccounts?: UserAccount[]; + /** A list of name-value pairs associated with the Pool as metadata. */ + readonly metadata?: MetadataItem[]; + /** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + readonly stats?: PoolStatistics; + /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ + readonly mountConfiguration?: MountConfiguration[]; + /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + readonly identity?: BatchPoolIdentity; + /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + targetNodeCommunicationMode?: NodeCommunicationMode; + /** The current state of the pool communication mode. */ + readonly currentNodeCommunicationMode?: NodeCommunicationMode; +} + +/** PoolState enums */ +/** "active", "deleting" */ +export type PoolState = string; +/** AllocationState enums */ +/** "steady", "resizing", "stopping" */ +export type AllocationState = string; + +/** An error that occurred when resizing a Pool. */ +export interface ResizeError { + /** An identifier for the Pool resize error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Pool resize error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the Pool resize error. */ + values?: NameValuePair[]; +} + +/** Represents a name-value pair. */ +export interface NameValuePair { + /** The name in the name-value pair. */ + name?: string; + /** The value in the name-value pair. */ + value?: string; +} + +/** The results and errors from an execution of a Pool autoscale formula. */ +export interface AutoScaleRun { + /** The time at which the autoscale formula was last evaluated. */ + timestamp: Date; + /** The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form $variable=value, and variables are separated by semicolons. */ + results?: string; + /** Details of the error encountered evaluating the autoscale formula on the Pool, if the evaluation was unsuccessful. */ + error?: AutoScaleRunError; +} + +/** An error that occurred when executing or evaluating a Pool autoscale formula. */ +export interface AutoScaleRunError { + /** An identifier for the autoscale error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the autoscale error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the autoscale error. */ + values?: NameValuePair[]; +} + +/** Contains utilization and resource usage statistics for the lifetime of a Pool. */ +export interface PoolStatistics { + /** The URL for the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** Statistics related to Pool usage, such as the amount of core-time used. */ + usageStats?: UsageStatistics; + /** Statistics related to resource consumption by Compute Nodes in the Pool. */ + resourceStats?: ResourceStatistics; +} + +/** Statistics related to Pool usage information. */ +export interface UsageStatistics { + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. */ + dedicatedCoreTime: string; +} + +/** Statistics related to resource consumption by Compute Nodes in a Pool. */ +export interface ResourceStatistics { + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** The average CPU usage across all Compute Nodes in the Pool (percentage per node). */ + avgCPUPercentage: number; + /** The average memory usage in GiB across all Compute Nodes in the Pool. */ + avgMemoryGiB: number; + /** The peak memory usage in GiB across all Compute Nodes in the Pool. */ + peakMemoryGiB: number; + /** The average used disk space in GiB across all Compute Nodes in the Pool. */ + avgDiskGiB: number; + /** The peak used disk space in GiB across all Compute Nodes in the Pool. */ + peakDiskGiB: number; + /** The total number of disk read operations across all Compute Nodes in the Pool. */ + diskReadIOps: number; + /** The total number of disk write operations across all Compute Nodes in the Pool. */ + diskWriteIOps: number; + /** The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. */ + diskReadGiB: number; + /** The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. */ + diskWriteGiB: number; + /** The total amount of data in GiB of network reads across all Compute Nodes in the Pool. */ + networkReadGiB: number; + /** The total amount of data in GiB of network writes across all Compute Nodes in the Pool. */ + networkWriteGiB: number; +} + +/** The identity of the Batch pool, if configured. */ +export interface BatchPoolIdentity { + /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + type: PoolIdentityType; + /** The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + userAssignedIdentities?: UserAssignedIdentity[]; +} + +/** PoolIdentityType enums */ +/** "UserAssigned", "None" */ +export type PoolIdentityType = string; + +/** The user assigned Identity */ +export interface UserAssignedIdentity { + /** The ARM resource id of the user assigned identity. */ + resourceId: string; + /** The client id of the user assigned identity. */ + readonly clientId?: string; + /** The principal id of the user assigned identity. */ + readonly principalId?: string; +} + +/** Options for updating an Azure Batch Pool. */ +export interface BatchPoolUpdateOptions { + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. */ + startTask?: StartTask; + /** + * If this element is present, it replaces any existing Certificate references configured on the Pool. + * If omitted, any existing Certificate references are left unchanged. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + certificateReferences?: CertificateReference[]; + /** A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. */ + metadata?: MetadataItem[]; + /** The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. */ + targetNodeCommunicationMode?: NodeCommunicationMode; +} + +/** Options for enabling automatic scaling on an Azure Batch Pool. */ +export interface BatchPoolEnableAutoScaleOptions { + /** The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + autoScaleFormula?: string; + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */ + autoScaleEvaluationInterval?: string; +} + +/** Options for evaluating an automatic scaling formula on an Azure Batch Pool. */ +export interface BatchPoolEvaluateAutoScaleOptions { + /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + autoScaleFormula: string; +} + +/** Options for changing the size of an Azure Batch Pool. */ +export interface BatchPoolResizeOptions { + /** The desired number of dedicated Compute Nodes in the Pool. */ + targetDedicatedNodes?: number; + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ + targetLowPriorityNodes?: number; + /** The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + resizeTimeout?: string; + /** Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. */ + nodeDeallocationOption?: BatchNodeDeallocationOption; +} + +/** BatchNodeDeallocationOption enums */ +/** "requeue", "terminate", "taskcompletion", "retaineddata" */ +export type BatchNodeDeallocationOption = string; + +/** Options for replacing properties on an Azure Batch Pool. */ +export interface BatchPoolReplaceOptions { + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. */ + startTask?: StartTask; + /** + * This list replaces any existing Certificate references configured on the Pool. + * If you specify an empty collection, any existing Certificate references are removed from the Pool. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + certificateReferences: CertificateReference[]; + /** The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. */ + applicationPackageReferences: ApplicationPackageReference[]; + /** A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. */ + metadata: MetadataItem[]; + /** The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. */ + targetNodeCommunicationMode?: NodeCommunicationMode; +} + +/** Options for removing nodes from an Azure Batch Pool. */ +export interface NodeRemoveOptions { + /** A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. */ + nodeList: string[]; + /** The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + resizeTimeout?: string; + /** Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. */ + nodeDeallocationOption?: BatchNodeDeallocationOption; +} + +/** The result of listing the supported Virtual Machine Images. */ +export interface AccountListSupportedImagesResult { + /** The list of supported Virtual Machine Images. */ + value?: ImageInformation[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** + * A reference to the Azure Virtual Machines Marketplace Image and additional + * information about the Image. + */ +export interface ImageInformation { + /** The ID of the Compute Node agent SKU which the Image supports. */ + nodeAgentSKUId: string; + /** The reference to the Azure Virtual Machine's Marketplace Image. */ + imageReference: ImageReference; + /** The type of operating system (e.g. Windows or Linux) of the Image. */ + osType: OSType; + /** The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. */ + capabilities?: string[]; + /** The time when the Azure Batch service will stop accepting create Pool requests for the Image. */ + batchSupportEndOfLife?: Date; + /** Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. */ + verificationType: VerificationType; +} + +/** OSType enums */ +/** "linux", "windows" */ +export type OSType = string; +/** VerificationType enums */ +/** "verified", "unverified" */ +export type VerificationType = string; + +/** The result of listing the Compute Node counts in the Account. */ +export interface PoolNodeCountsListResult { + /** A list of Compute Node counts by Pool. */ + value?: PoolNodeCounts[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** The number of Compute Nodes in each state for a Pool. */ +export interface PoolNodeCounts { + /** The ID of the Pool. */ + poolId: string; + /** The number of dedicated Compute Nodes in each state. */ + dedicated?: NodeCounts; + /** The number of Spot/Low-priority Compute Nodes in each state. */ + lowPriority?: NodeCounts; +} + +/** The number of Compute Nodes in each Compute Node state. */ +export interface NodeCounts { + /** The number of Compute Nodes in the creating state. */ + creating: number; + /** The number of Compute Nodes in the idle state. */ + idle: number; + /** The number of Compute Nodes in the offline state. */ + offline: number; + /** The number of Compute Nodes in the preempted state. */ + preempted: number; + /** The count of Compute Nodes in the rebooting state. */ + rebooting: number; + /** The number of Compute Nodes in the reimaging state. */ + reimaging: number; + /** The number of Compute Nodes in the running state. */ + running: number; + /** The number of Compute Nodes in the starting state. */ + starting: number; + /** The number of Compute Nodes in the startTaskFailed state. */ + startTaskFailed: number; + /** The number of Compute Nodes in the leavingPool state. */ + leavingPool: number; + /** The number of Compute Nodes in the unknown state. */ + unknown: number; + /** The number of Compute Nodes in the unusable state. */ + unusable: number; + /** The number of Compute Nodes in the waitingForStartTask state. */ + waitingForStartTask: number; + /** The total number of Compute Nodes. */ + total: number; +} + +/** An Azure Batch Job. */ +export interface BatchJob { + /** A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + readonly id?: string; + /** The display name for the Job. */ + readonly displayName?: string; + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ + readonly usesTaskDependencies?: boolean; + /** The URL of the Job. */ + readonly url?: string; + /** The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. */ + readonly lastModified?: Date; + /** The creation time of the Job. */ + readonly creationTime?: Date; + /** The current state of the Job. */ + readonly state?: JobState; + /** The time at which the Job entered its current state. */ + readonly stateTransitionTime?: Date; + /** The previous state of the Job. This property is not set if the Job is in its initial Active state. */ + readonly previousState?: JobState; + /** The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. */ + readonly previousStateTransitionTime?: Date; + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The execution constraints for the Job. */ + constraints?: JobConstraints; + /** Details of a Job Manager Task to be launched when the Job is started. */ + readonly jobManagerTask?: JobManagerTask; + /** The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. */ + readonly jobPreparationTask?: JobPreparationTask; + /** The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. */ + readonly jobReleaseTask?: JobReleaseTask; + /** The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ + readonly commonEnvironmentSettings?: EnvironmentSetting[]; + /** The Pool settings associated with the Job. */ + poolInfo: PoolInformation; + /** The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. */ + onAllTasksComplete?: OnAllTasksComplete; + /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + readonly onTaskFailure?: OnTaskFailure; + /** The network configuration for the Job. */ + readonly networkConfiguration?: JobNetworkConfiguration; + /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; + /** The execution information for the Job. */ + readonly executionInfo?: JobExecutionInformation; + /** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + readonly stats?: JobStatistics; +} + +/** JobState enums */ +/** "active", "disabling", "disabled", "enabling", "terminating", "completed", "deleting" */ +export type JobState = string; + +/** The execution constraints for a Job. */ +export interface JobConstraints { + /** The maximum elapsed time that the Job may run, measured from the time the Job is created. If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run. */ + maxWallClockTime?: string; + /** The maximum number of times each Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries). */ + maxTaskRetryCount?: number; +} + +/** + * Specifies details of a Job Manager Task. + * The Job Manager Task is automatically started when the Job is created. The + * Batch service tries to schedule the Job Manager Task before any other Tasks in + * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where + * Job Manager Tasks are running for as long as possible (that is, Compute Nodes + * running 'normal' Tasks are removed before Compute Nodes running Job Manager + * Tasks). When a Job Manager Task fails and needs to be restarted, the system + * tries to schedule it at the highest priority. If there are no idle Compute + * Nodes available, the system may terminate one of the running Tasks in the Pool + * and return it to the queue in order to make room for the Job Manager Task to + * restart. Note that a Job Manager Task in one Job does not have priority over + * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For + * example, if a Job Manager in a priority 0 Job needs to be restarted, it will + * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery + * operation is triggered on a Node. Examples of recovery operations include (but + * are not limited to) when an unhealthy Node is rebooted or a Compute Node + * disappeared due to host failure. Retries due to recovery operations are + * independent of and are not counted against the maxTaskRetryCount. Even if the + * maxTaskRetryCount is 0, an internal retry due to a recovery operation may + * occur. Because of this, all Tasks should be idempotent. This means Tasks need + * to tolerate being interrupted and restarted without causing any corruption or + * duplicate data. The best practice for long running Tasks is to use some form of + * checkpointing. + */ +export interface JobManagerTask { + /** A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. */ + id: string; + /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + resourceFiles?: ResourceFile[]; + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ + outputFiles?: OutputFile[]; + /** A list of environment variable settings for the Job Manager Task. */ + environmentSettings?: EnvironmentSetting[]; + /** Constraints that apply to the Job Manager Task. */ + constraints?: TaskConstraints; + /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this property is not supported and must not be specified. */ + requiredSlots?: number; + /** Whether completion of the Job Manager Task signifies completion of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false. */ + killJobOnCompletion?: boolean; + /** The user identity under which the Job Manager Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + userIdentity?: UserIdentity; + /** Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true. */ + runExclusive?: boolean; + /** + * A list of Application Packages that the Batch service will deploy to the + * Compute Node before running the command line.Application Packages are + * downloaded and deployed to a shared directory, not the Task working + * directory. Therefore, if a referenced Application Package is already + * on the Compute Node, and is up to date, then it is not re-downloaded; + * the existing copy on the Compute Node is used. If a referenced Application + * Package cannot be installed, for example because the package has been deleted + * or because download failed, the Task fails. + */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ + authenticationTokenSettings?: AuthenticationTokenSettings; + /** Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is true. */ + allowLowPriorityNode?: boolean; +} + +/** On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. */ +export interface OutputFile { + /** A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. */ + filePattern: string; + /** The destination for the output file(s). */ + destination: OutputFileDestination; + /** Additional options for the upload operation, including under what conditions to perform the upload. */ + uploadOptions: OutputFileUploadOptions; +} + +/** The destination to which a file should be uploaded. */ +export interface OutputFileDestination { + /** A location in Azure blob storage to which files are uploaded. */ + container?: OutputFileBlobContainerDestination; +} + +/** Specifies a file upload destination within an Azure blob storage container. */ +export interface OutputFileBlobContainerDestination { + /** The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. */ + path?: string; + /** The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. */ + containerUrl: string; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ + identityReference?: BatchNodeIdentityReference; + /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ + uploadHeaders?: HttpHeader[]; +} + +/** An HTTP header name-value pair */ +export interface HttpHeader { + /** The case-insensitive name of the header to be used while uploading output files. */ + name: string; + /** The value of the header to be used while uploading output files. */ + value?: string; +} + +/** + * Options for an output file upload operation, including under what conditions + * to perform the upload. + */ +export interface OutputFileUploadOptions { + /** The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. */ + uploadCondition: OutputFileUploadCondition; +} + +/** OutputFileUploadCondition enums */ +/** "tasksuccess", "taskfailure", "taskcompletion" */ +export type OutputFileUploadCondition = string; + +/** Execution constraints to apply to a Task. */ +export interface TaskConstraints { + /** The maximum elapsed time that the Task may run, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run. */ + maxWallClockTime?: string; + /** The minimum time to retain the Task directory on the Compute Node where it ran, from the time it completes execution. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. */ + retentionTime?: string; + /** The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). */ + maxTaskRetryCount?: number; +} + +/** + * The settings for an authentication token that the Task can use to perform Batch + * service operations. + */ +export interface AuthenticationTokenSettings { + /** The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. */ + access?: AccessScope[]; +} + +/** AccessScope enums */ +/** "job" */ +export type AccessScope = string; + +/** + * A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. + * You can use Job Preparation to prepare a Node to run Tasks for the Job. + * Activities commonly performed in Job Preparation include: Downloading common + * resource files used by all the Tasks in the Job. The Job Preparation Task can + * download these common resource files to the shared location on the Node. + * (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the Node so + * that all Tasks of that Job can communicate with it. If the Job Preparation Task + * fails (that is, exhausts its retry count before exiting with exit code 0), + * Batch will not run Tasks of this Job on the Node. The Compute Node remains + * ineligible to run Tasks of this Job until it is reimaged. The Compute Node + * remains active and can be used for other Jobs. The Job Preparation Task can run + * multiple times on the same Node. Therefore, you should write the Job + * Preparation Task to handle re-execution. If the Node is rebooted, the Job + * Preparation Task is run again on the Compute Node before scheduling any other + * Task of the Job, if rerunOnNodeRebootAfterSuccess is true or if the Job + * Preparation Task did not previously complete. If the Node is reimaged, the Job + * Preparation Task is run again before scheduling any Task of the Job. Batch will + * retry Tasks when a recovery operation is triggered on a Node. Examples of + * recovery operations include (but are not limited to) when an unhealthy Node is + * rebooted or a Compute Node disappeared due to host failure. Retries due to + * recovery operations are independent of and are not counted against the + * maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to + * a recovery operation may occur. Because of this, all Tasks should be + * idempotent. This means Tasks need to tolerate being interrupted and restarted + * without causing any corruption or duplicate data. The best practice for long + * running Tasks is to use some form of checkpointing. + */ +export interface JobPreparationTask { + /** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ + id?: string; + /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + resourceFiles?: ResourceFile[]; + /** A list of environment variable settings for the Job Preparation Task. */ + environmentSettings?: EnvironmentSetting[]; + /** Constraints that apply to the Job Preparation Task. */ + constraints?: TaskConstraints; + /** Whether the Batch service should wait for the Job Preparation Task to complete successfully before scheduling any other Tasks of the Job on the Compute Node. A Job Preparation Task has completed successfully if it exits with exit code 0. If true and the Job Preparation Task fails on a Node, the Batch service retries the Job Preparation Task up to its maximum retry count (as specified in the constraints element). If the Task has still not completed successfully after all retries, then the Batch service will not schedule Tasks of the Job to the Node. The Node remains active and eligible to run Tasks of other Jobs. If false, the Batch service will not wait for the Job Preparation Task to complete. In this case, other Tasks of the Job can start executing on the Compute Node while the Job Preparation Task is still running; and even if the Job Preparation Task fails, new Tasks will continue to be scheduled on the Compute Node. The default value is true. */ + waitForSuccess?: boolean; + /** The user identity under which the Job Preparation Task runs. If omitted, the Task runs as a non-administrative user unique to the Task on Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux Compute Nodes. */ + userIdentity?: UserIdentity; + /** Whether the Batch service should rerun the Job Preparation Task after a Compute Node reboots. The Job Preparation Task is always rerun if a Compute Node is reimaged, or if the Job Preparation Task did not complete (e.g. because the reboot occurred while the Task was running). Therefore, you should always write a Job Preparation Task to be idempotent and to behave correctly if run multiple times. The default value is true. */ + rerunOnNodeRebootAfterSuccess?: boolean; +} + +/** + * A Job Release Task to run on Job completion on any Compute Node where the Job has run. + * The Job Release Task runs when the Job ends, because of one of the following: + * The user calls the Terminate Job API, or the Delete Job API while the Job is + * still active, the Job's maximum wall clock time constraint is reached, and the + * Job is still active, or the Job's Job Manager Task completed, and the Job is + * configured to terminate when the Job Manager completes. The Job Release Task + * runs on each Node where Tasks of the Job have run and the Job Preparation Task + * ran and completed. If you reimage a Node after it has run the Job Preparation + * Task, and the Job ends without any further Tasks of the Job running on that + * Node (and hence the Job Preparation Task does not re-run), then the Job Release + * Task does not run on that Compute Node. If a Node reboots while the Job Release + * Task is still running, the Job Release Task runs again when the Compute Node + * starts up. The Job is not marked as complete until all Job Release Tasks have + * completed. The Job Release Task runs in the background. It does not occupy a + * scheduling slot; that is, it does not count towards the taskSlotsPerNode limit + * specified on the Pool. + */ +export interface JobReleaseTask { + /** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ + id?: string; + /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. */ + resourceFiles?: ResourceFile[]; + /** A list of environment variable settings for the Job Release Task. */ + environmentSettings?: EnvironmentSetting[]; + /** The maximum elapsed time that the Job Release Task may run on a given Compute Node, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. The default value is 15 minutes. You may not specify a timeout longer than 15 minutes. If you do, the Batch service rejects it with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + maxWallClockTime?: string; + /** The minimum time to retain the Task directory for the Job Release Task on the Compute Node. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. */ + retentionTime?: string; + /** The user identity under which the Job Release Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + userIdentity?: UserIdentity; +} + +/** Specifies how a Job should be assigned to a Pool. */ +export interface PoolInformation { + /** The ID of an existing Pool. All the Tasks of the Job will run on the specified Pool. You must ensure that the Pool referenced by this property exists. If the Pool does not exist at the time the Batch service tries to schedule a Job, no Tasks for the Job will run until you create a Pool with that id. Note that the Batch service will not reject the Job request; it will simply not run Tasks until the Pool exists. You must specify either the Pool ID or the auto Pool specification, but not both. */ + poolId?: string; + /** Characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. If auto Pool creation fails, the Batch service moves the Job to a completed state, and the Pool creation error is set in the Job's scheduling error property. The Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) of the auto Pool. Any user actions that affect the lifetime of the auto Pool while the Job is active will result in unexpected behavior. You must specify either the Pool ID or the auto Pool specification, but not both. */ + autoPoolSpecification?: AutoPoolSpecification; +} + +/** + * Specifies characteristics for a temporary 'auto pool'. The Batch service will + * create this auto Pool when the Job is submitted. + */ +export interface AutoPoolSpecification { + /** A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. */ + autoPoolIdPrefix?: string; + /** The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. */ + poolLifetimeOption: PoolLifetimeOption; + /** Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. */ + keepAlive?: boolean; + /** The Pool specification for the auto Pool. */ + pool?: PoolSpecification; +} + +/** PoolLifetimeOption enums */ +/** "jobschedule", "job" */ +export type PoolLifetimeOption = string; + +/** Specification for creating a new Pool. */ +export interface PoolSpecification { + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + vmSize: string; + /** The cloud service configuration for the Pool. This property must be specified if the Pool needs to be created with Azure PaaS VMs. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ + cloudServiceConfiguration?: CloudServiceConfiguration; + /** The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + virtualMachineConfiguration?: VirtualMachineConfiguration; + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ + taskSlotsPerNode?: number; + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + taskSchedulingPolicy?: TaskSchedulingPolicy; + /** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + resizeTimeout?: string; + /** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ + targetDedicatedNodes?: number; + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ + targetLowPriorityNodes?: number; + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The Pool automatically resizes according to the formula. The default value is false. */ + enableAutoScale?: boolean; + /** The formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. */ + autoScaleFormula?: string; + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + autoScaleEvaluationInterval?: string; + /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. */ + enableInterNodeCommunication?: boolean; + /** The network configuration for the Pool. */ + networkConfiguration?: NetworkConfiguration; + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. */ + startTask?: StartTask; + /** + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. + * Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + certificateReferences?: CertificateReference[]; + /** The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. The permitted licenses available on the Pool are 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each application license added to the Pool. */ + applicationLicenses?: string[]; + /** The list of user Accounts to be created on each Compute Node in the Pool. */ + userAccounts?: UserAccount[]; + /** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; + /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ + mountConfiguration?: MountConfiguration[]; + /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + targetNodeCommunicationMode?: NodeCommunicationMode; +} + +/** The action the Batch service should take when all Tasks in the Job are in the completed state. */ +/** "noaction", "terminatejob" */ +export type OnAllTasksComplete = string; +/** OnTaskFailure enums */ +/** "noaction", "performexitoptionsjobaction" */ +export type OnTaskFailure = string; + +/** The network configuration for the Job. */ +export interface JobNetworkConfiguration { + /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + subnetId: string; +} + +/** Contains information about the execution of a Job in the Azure Batch service. */ +export interface JobExecutionInformation { + /** The start time of the Job. This is the time at which the Job was created. */ + startTime: Date; + /** The completion time of the Job. This property is set only if the Job is in the completed state. */ + endTime?: Date; + /** The ID of the Pool to which this Job is assigned. This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool. */ + poolId?: string; + /** Details of any error encountered by the service in starting the Job. This property is not set if there was no error starting the Job. */ + schedulingError?: JobSchedulingError; + /** A string describing the reason the Job ended. This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation. */ + terminateReason?: string; +} + +/** An error encountered by the Batch service when scheduling a Job. */ +export interface JobSchedulingError { + /** The category of the Job scheduling error. */ + category: ErrorCategory; + /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Job scheduling error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the scheduling error. */ + details?: NameValuePair[]; +} + +/** ErrorCategory enums */ +/** "usererror", "servererror" */ +export type ErrorCategory = string; + +/** Resource usage statistics for a Job. */ +export interface JobStatistics { + /** The URL of the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ + userCPUTime: string; + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ + kernelCPUTime: string; + /** The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */ + wallClockTime: string; + /** The total number of disk read operations made by all Tasks in the Job. */ + readIOps: number; + /** The total number of disk write operations made by all Tasks in the Job. */ + writeIOps: number; + /** The total amount of data in GiB read from disk by all Tasks in the Job. */ + readIOGiB: number; + /** The total amount of data in GiB written to disk by all Tasks in the Job. */ + writeIOGiB: number; + /** The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. */ + numSucceededTasks: number; + /** The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. */ + numFailedTasks: number; + /** The total number of retries on all the Tasks in the Job during the given time range. */ + numTaskRetries: number; + /** The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */ + waitTime: string; +} + +/** Options for updating an Azure Batch Job. */ +export interface BatchJobUpdateOptions { + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. */ + constraints?: JobConstraints; + /** The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. */ + poolInfo?: PoolInformation; + /** The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + onAllTasksComplete?: OnAllTasksComplete; + /** A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. */ + metadata?: MetadataItem[]; +} + +/** Options for disabling an Azure Batch Job. */ +export interface BatchJobDisableOptions { + /** What to do with active Tasks associated with the Job. */ + disableTasks: DisableJobOption; +} + +/** DisableJobOption enums */ +/** "requeue", "terminate", "wait" */ +export type DisableJobOption = string; + +/** Options for terminating an Azure Batch Job. */ +export interface BatchJobTerminateOptions { + /** The text you want to appear as the Job's TerminateReason. The default is 'UserTerminate'. */ + terminateReason?: string; +} + +/** Options for creating an Azure Batch Job. */ +export interface BatchJobCreateOptions { + /** A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + id: string; + /** The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ + usesTaskDependencies?: boolean; + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The execution constraints for the Job. */ + constraints?: JobConstraints; + /** Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. */ + jobManagerTask?: JobManagerTask; + /** The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ + jobPreparationTask?: JobPreparationTask; + /** The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. */ + jobReleaseTask?: JobReleaseTask; + /** The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ + commonEnvironmentSettings?: EnvironmentSetting[]; + /** The Pool on which the Batch service runs the Job's Tasks. */ + poolInfo: PoolInformation; + /** The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + onAllTasksComplete?: OnAllTasksComplete; + /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + onTaskFailure?: OnTaskFailure; + /** The network configuration for the Job. */ + networkConfiguration?: JobNetworkConfiguration; + /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; +} + +/** The result of listing the Jobs in an Account. */ +export interface BatchJobListResult { + /** The list of Jobs. */ + value?: BatchJob[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** + * The result of listing the status of the Job Preparation and Job Release Tasks + * for a Job. + */ +export interface BatchJobListPreparationAndReleaseTaskStatusResult { + /** A list of Job Preparation and Job Release Task execution information. */ + value?: JobPreparationAndReleaseTaskExecutionInformation[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** The status of the Job Preparation and Job Release Tasks on a Compute Node. */ +export interface JobPreparationAndReleaseTaskExecutionInformation { + /** The ID of the Pool containing the Compute Node to which this entry refers. */ + poolId?: string; + /** The ID of the Compute Node to which this entry refers. */ + nodeId?: string; + /** The URL of the Compute Node to which this entry refers. */ + nodeUrl?: string; + /** Information about the execution status of the Job Preparation Task on this Compute Node. */ + jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformation; + /** Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. */ + jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformation; +} + +/** + * Contains information about the execution of a Job Preparation Task on a Compute + * Node. + */ +export interface JobPreparationTaskExecutionInformation { + /** The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. */ + startTime: Date; + /** The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. */ + endTime?: Date; + /** The current state of the Job Preparation Task on the Compute Node. */ + state: JobPreparationTaskState; + /** The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ + taskRootDirectory?: string; + /** The URL to the root directory of the Job Preparation Task on the Compute Node. */ + taskRootDirectoryUrl?: string; + /** The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ + retryCount: number; + /** The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ + lastRetryTime?: Date; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; +} + +/** JobPreparationTaskState enums */ +/** "running", "completed" */ +export type JobPreparationTaskState = string; + +/** Contains information about the container which a Task is executing. */ +export interface TaskContainerExecutionInformation { + /** The ID of the container. */ + containerId?: string; + /** The state of the container. This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect". */ + state?: string; + /** Detailed error information about the container. This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect". */ + error?: string; +} + +/** Information about a Task failure. */ +export interface TaskFailureInformation { + /** The category of the Task error. */ + category: ErrorCategory; + /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Task error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional details related to the error. */ + details?: NameValuePair[]; +} + +/** TaskExecutionResult enums */ +/** "success", "failure" */ +export type TaskExecutionResult = string; + +/** + * Contains information about the execution of a Job Release Task on a Compute + * Node. + */ +export interface JobReleaseTaskExecutionInformation { + /** The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. */ + startTime: Date; + /** The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. */ + endTime?: Date; + /** The current state of the Job Release Task on the Compute Node. */ + state: JobReleaseTaskState; + /** The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ + taskRootDirectory?: string; + /** The URL to the root directory of the Job Release Task on the Compute Node. */ + taskRootDirectoryUrl?: string; + /** The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; +} + +/** JobReleaseTaskState enums */ +/** "running", "completed" */ +export type JobReleaseTaskState = string; + +/** The Task and TaskSlot counts for a Job. */ +export interface TaskCountsResult { + /** The number of Tasks per state. */ + taskCounts: TaskCounts; + /** The number of TaskSlots required by Tasks per state. */ + taskSlotCounts: TaskSlotCounts; +} + +/** The Task counts for a Job. */ +export interface TaskCounts { + /** The number of Tasks in the active state. */ + active: number; + /** The number of Tasks in the running or preparing state. */ + running: number; + /** The number of Tasks in the completed state. */ + completed: number; + /** The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'. */ + succeeded: number; + /** The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'. */ + failed: number; +} + +/** The TaskSlot counts for a Job. */ +export interface TaskSlotCounts { + /** The number of TaskSlots for active Tasks. */ + active: number; + /** The number of TaskSlots for running Tasks. */ + running: number; + /** The number of TaskSlots for completed Tasks. */ + completed: number; + /** The number of TaskSlots for succeeded Tasks. */ + succeeded: number; + /** The number of TaskSlots for failed Tasks. */ + failed: number; +} + +/** + * A Certificate that can be installed on Compute Nodes and can be used to + * authenticate operations on the machine. + */ +export interface BatchCertificate { + /** The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). */ + thumbprint: string; + /** The algorithm used to derive the thumbprint. This must be sha1. */ + thumbprintAlgorithm: string; + /** The URL of the Certificate. */ + readonly url?: string; + /** The state of the Certificate. */ + readonly state?: CertificateState; + /** The time at which the Certificate entered its current state. */ + readonly stateTransitionTime?: Date; + /** The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. */ + readonly previousState?: CertificateState; + /** The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. */ + readonly previousStateTransitionTime?: Date; + /** The public part of the Certificate as a base-64 encoded .cer file. */ + readonly publicData?: Uint8Array; + /** The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. */ + readonly deleteCertificateError?: DeleteCertificateError; + /** The base64-encoded contents of the Certificate. The maximum size is 10KB. */ + data: Uint8Array; + /** The format of the Certificate data. */ + certificateFormat?: CertificateFormat; + /** The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. */ + password?: string; +} + +/** CertificateState enums */ +/** "active", "deleting", "deletefailed" */ +export type CertificateState = string; + +/** An error encountered by the Batch service when deleting a Certificate. */ +export interface DeleteCertificateError { + /** An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Certificate deletion error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. */ + values?: NameValuePair[]; +} + +/** CertificateFormat enums */ +/** "pfx", "cer" */ +export type CertificateFormat = string; + +/** The result of listing the Certificates in the Account. */ +export interface CertificateListResult { + /** The list of Certificates. */ + value?: BatchCertificate[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** + * A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a + * specification used to create each Job. + */ +export interface BatchJobSchedule { + /** A string that uniquely identifies the schedule within the Account. */ + readonly id?: string; + /** The display name for the schedule. */ + readonly displayName?: string; + /** The URL of the Job Schedule. */ + readonly url?: string; + /** The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. */ + readonly lastModified?: Date; + /** The creation time of the Job Schedule. */ + readonly creationTime?: Date; + /** The current state of the Job Schedule. */ + readonly state?: JobScheduleState; + /** The time at which the Job Schedule entered the current state. */ + readonly stateTransitionTime?: Date; + /** The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. */ + readonly previousState?: JobScheduleState; + /** The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. */ + readonly previousStateTransitionTime?: Date; + /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. */ + schedule: Schedule; + /** The details of the Jobs to be created on this schedule. */ + jobSpecification: JobSpecification; + /** Information about Jobs that have been and will be run under this schedule. */ + readonly executionInfo?: JobScheduleExecutionInformation; + /** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; + /** The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + readonly stats?: JobScheduleStatistics; +} + +/** JobScheduleState enums */ +/** "active", "completed", "disabled", "terminating", "deleting" */ +export type JobScheduleState = string; + +/** + * The schedule according to which Jobs will be created. All times are fixed + * respective to UTC and are not impacted by daylight saving time. + */ +export interface Schedule { + /** The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately. */ + doNotRunUntil?: Date; + /** A time after which no Job will be created under this Job Schedule. The schedule will move to the completed state as soon as this deadline is past and there is no active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it. */ + doNotRunAfter?: Date; + /** The time interval, starting from the time at which the schedule indicates a Job should be created, within which a Job must be created. If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + startWindow?: string; + /** The time interval between the start times of two successive Jobs under the Job Schedule. A Job Schedule can have at most one active Job under it at any given time. Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + recurrenceInterval?: string; +} + +/** Specifies details of the Jobs to be created on a schedule. */ +export interface JobSpecification { + /** The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ + usesTaskDependencies?: boolean; + /** The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + onAllTasksComplete?: OnAllTasksComplete; + /** The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + onTaskFailure?: OnTaskFailure; + /** The network configuration for the Job. */ + networkConfiguration?: JobNetworkConfiguration; + /** The execution constraints for Jobs created under this schedule. */ + constraints?: JobConstraints; + /** The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. */ + jobManagerTask?: JobManagerTask; + /** The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ + jobPreparationTask?: JobPreparationTask; + /** The Job Release Task for Jobs created under this schedule. The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task. */ + jobReleaseTask?: JobReleaseTask; + /** A list of common environment variable settings. These environment variables are set for all Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ + commonEnvironmentSettings?: EnvironmentSetting[]; + /** The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. */ + poolInfo: PoolInformation; + /** A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; +} + +/** + * Contains information about Jobs that have been and will be run under a Job + * Schedule. + */ +export interface JobScheduleExecutionInformation { + /** The next time at which a Job will be created under this schedule. This property is meaningful only if the schedule is in the active state when the time comes around. For example, if the schedule is disabled, no Job will be created at nextRunTime unless the Job is enabled before then. */ + nextRunTime?: Date; + /** Information about the most recent Job under the Job Schedule. This property is present only if the at least one Job has run under the schedule. */ + recentJob?: RecentJob; + /** The time at which the schedule ended. This property is set only if the Job Schedule is in the completed state. */ + endTime?: Date; +} + +/** Information about the most recent Job to run under the Job Schedule. */ +export interface RecentJob { + /** The ID of the Job. */ + id?: string; + /** The URL of the Job. */ + url?: string; +} + +/** Resource usage statistics for a Job Schedule. */ +export interface JobScheduleStatistics { + /** The URL of the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. */ + userCPUTime: string; + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. */ + kernelCPUTime: string; + /** The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */ + wallClockTime: string; + /** The total number of disk read operations made by all Tasks in all Jobs created under the schedule. */ + readIOps: number; + /** The total number of disk write operations made by all Tasks in all Jobs created under the schedule. */ + writeIOps: number; + /** The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. */ + readIOGiB: number; + /** The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. */ + writeIOGiB: number; + /** The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. */ + numSucceededTasks: number; + /** The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. */ + numFailedTasks: number; + /** The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. */ + numTaskRetries: number; + /** The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */ + waitTime: string; +} + +/** Options for updating an Azure Batch Job Schedule. */ +export interface BatchJobScheduleUpdateOptions { + /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. If you do not specify this element, the existing schedule is left unchanged. */ + schedule?: Schedule; + /** The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification. */ + jobSpecification?: JobSpecification; + /** A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged. */ + metadata?: MetadataItem[]; +} + +/** Options for creating an Azure Batch Job Schedule */ +export interface BatchJobScheduleCreateOptions { + /** A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + id: string; + /** The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. */ + schedule: Schedule; + /** The details of the Jobs to be created on this schedule. */ + jobSpecification: JobSpecification; + /** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; +} + +/** The result of listing the Job Schedules in an Account. */ +export interface BatchJobScheduleListResult { + /** The list of Job Schedules. */ + value?: BatchJobSchedule[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** Options for creating an Azure Batch Task. */ +export interface BatchTaskCreateOptions { + /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). */ + id: string; + /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** How the Batch service should respond when the Task completes. */ + exitConditions?: ExitConditions; + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + resourceFiles?: ResourceFile[]; + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ + outputFiles?: OutputFile[]; + /** A list of environment variable settings for the Task. */ + environmentSettings?: EnvironmentSetting[]; + /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ + affinityInfo?: AffinityInformation; + /** The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. */ + constraints?: TaskConstraints; + /** The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ + requiredSlots?: number; + /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + userIdentity?: UserIdentity; + /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ + multiInstanceSettings?: MultiInstanceSettings; + /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. */ + dependsOn?: TaskDependencies; + /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ + authenticationTokenSettings?: AuthenticationTokenSettings; +} + +/** Specifies how the Batch service should respond when the Task completes. */ +export interface ExitConditions { + /** A list of individual Task exit codes and how the Batch service should respond to them. */ + exitCodes?: ExitCodeMapping[]; + /** A list of Task exit code ranges and how the Batch service should respond to them. */ + exitCodeRanges?: ExitCodeRangeMapping[]; + /** How the Batch service should respond if the Task fails to start due to an error. */ + preProcessingError?: ExitOptions; + /** How the Batch service should respond if a file upload error occurs. If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence. */ + fileUploadError?: ExitOptions; + /** How the Batch service should respond if the Task fails with an exit condition not covered by any of the other properties. This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. */ + default?: ExitOptions; +} + +/** + * How the Batch service should respond if a Task exits with a particular exit + * code. + */ +export interface ExitCodeMapping { + /** A process exit code. */ + code: number; + /** How the Batch service should respond if the Task exits with this exit code. */ + exitOptions: ExitOptions; +} + +/** Specifies how the Batch service responds to a particular exit condition. */ +export interface ExitOptions { + /** An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + jobAction?: JobAction; + /** An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. */ + dependencyAction?: DependencyAction; +} + +/** JobAction enums */ +/** "none", "disable", "terminate" */ +export type JobAction = string; +/** DependencyAction enums */ +/** "satisfy", "block" */ +export type DependencyAction = string; + +/** + * A range of exit codes and how the Batch service should respond to exit codes + * within that range. + */ +export interface ExitCodeRangeMapping { + /** The first exit code in the range. */ + start: number; + /** The last exit code in the range. */ + end: number; + /** How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive). */ + exitOptions: ExitOptions; +} + +/** + * A locality hint that can be used by the Batch service to select a Compute Node + * on which to start a Task. + */ +export interface AffinityInformation { + /** An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ + affinityId: string; +} + +/** + * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, + * if any of the subtasks fail (for example due to exiting with a non-zero exit + * code) the entire multi-instance Task fails. The multi-instance Task is then + * terminated and retried, up to its retry limit. + */ +export interface MultiInstanceSettings { + /** The number of Compute Nodes required by the Task. If omitted, the default is 1. */ + numberOfInstances?: number; + /** The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. */ + coordinationCommandLine: string; + /** A list of files that the Batch service will download before running the coordination command line. The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + commonResourceFiles?: ResourceFile[]; +} + +/** + * Specifies any dependencies of a Task. Any Task that is explicitly specified or + * within a dependency range must complete before the dependant Task will be + * scheduled. + */ +export interface TaskDependencies { + /** The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead. */ + taskIds?: string[]; + /** The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete successfully before the dependent Task can be scheduled. */ + taskIdRanges?: TaskIdRange[]; +} + +/** + * The start and end of the range are inclusive. For example, if a range has start + * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + */ +export interface TaskIdRange { + /** The first Task ID in the range. */ + start: number; + /** The last Task ID in the range. */ + end: number; +} + +/** The result of listing the Tasks in a Job. */ +export interface BatchTaskListResult { + /** The list of Tasks. */ + value?: BatchTask[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** + * Batch will retry Tasks when a recovery operation is triggered on a Node. + * Examples of recovery operations include (but are not limited to) when an + * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + * Retries due to recovery operations are independent of and are not counted + * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + * retry due to a recovery operation may occur. Because of this, all Tasks should + * be idempotent. This means Tasks need to tolerate being interrupted and + * restarted without causing any corruption or duplicate data. The best practice + * for long running Tasks is to use some form of checkpointing. + */ +export interface BatchTask { + /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. */ + readonly id?: string; + /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + readonly displayName?: string; + /** The URL of the Task. */ + readonly url?: string; + /** The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Task. */ + readonly lastModified?: Date; + /** The creation time of the Task. */ + readonly creationTime?: Date; + /** How the Batch service should respond when the Task completes. */ + readonly exitConditions?: ExitConditions; + /** The current state of the Task. */ + readonly state?: TaskState; + /** The time at which the Task entered its current state. */ + readonly stateTransitionTime?: Date; + /** The previous state of the Task. This property is not set if the Task is in its initial Active state. */ + readonly previousState?: TaskState; + /** The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. */ + readonly previousStateTransitionTime?: Date; + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + readonly commandLine?: string; + /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + readonly containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + readonly resourceFiles?: ResourceFile[]; + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ + readonly outputFiles?: OutputFile[]; + /** A list of environment variable settings for the Task. */ + readonly environmentSettings?: EnvironmentSetting[]; + /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ + readonly affinityInfo?: AffinityInformation; + /** The execution constraints that apply to this Task. */ + constraints?: TaskConstraints; + /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ + readonly requiredSlots?: number; + /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + readonly userIdentity?: UserIdentity; + /** Information about the execution of the Task. */ + readonly executionInfo?: TaskExecutionInformation; + /** Information about the Compute Node on which the Task ran. */ + readonly nodeInfo?: BatchNodeInformation; + /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ + readonly multiInstanceSettings?: MultiInstanceSettings; + /** Resource usage statistics for the Task. */ + readonly stats?: TaskStatistics; + /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. */ + readonly dependsOn?: TaskDependencies; + /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ + readonly applicationPackageReferences?: ApplicationPackageReference[]; + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ + readonly authenticationTokenSettings?: AuthenticationTokenSettings; +} + +/** TaskState enums */ +/** "active", "preparing", "running", "completed" */ +export type TaskState = string; + +/** Information about the execution of a Task. */ +export interface TaskExecutionInformation { + /** The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state. */ + startTime?: Date; + /** The time at which the Task completed. This property is set only if the Task is in the Completed state. */ + endTime?: Date; + /** The exit code of the program specified on the Task command line. This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ + retryCount: number; + /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ + lastRetryTime?: Date; + /** The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons. */ + requeueCount: number; + /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ + lastRequeueTime?: Date; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; +} + +/** Information about the Compute Node on which a Task ran. */ +export interface BatchNodeInformation { + /** An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. */ + affinityId?: string; + /** The URL of the Compute Node on which the Task ran. */ + nodeUrl?: string; + /** The ID of the Pool on which the Task ran. */ + poolId?: string; + /** The ID of the Compute Node on which the Task ran. */ + nodeId?: string; + /** The root directory of the Task on the Compute Node. */ + taskRootDirectory?: string; + /** The URL to the root directory of the Task on the Compute Node. */ + taskRootDirectoryUrl?: string; +} + +/** Resource usage statistics for a Task. */ +export interface TaskStatistics { + /** The URL of the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ + userCPUTime: string; + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ + kernelCPUTime: string; + /** The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. */ + wallClockTime: string; + /** The total number of disk read operations made by the Task. */ + readIOps: number; + /** The total number of disk write operations made by the Task. */ + writeIOps: number; + /** The total gibibytes read from disk by the Task. */ + readIOGiB: number; + /** The total gibibytes written to disk by the Task. */ + writeIOGiB: number; + /** The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). */ + waitTime: string; +} + +/** A collection of Azure Batch Tasks to add. */ +export interface BatchTaskCollection { + /** The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. */ + value: BatchTaskCreateOptions[]; +} + +/** The result of adding a collection of Tasks to a Job. */ +export interface TaskAddCollectionResult { + /** The results of the add Task collection operation. */ + value?: TaskAddResult[]; +} + +/** Result for a single Task added as part of an add Task collection operation. */ +export interface TaskAddResult { + /** The status of the add Task request. */ + status: TaskAddStatus; + /** The ID of the Task for which this is the result. */ + taskId: string; + /** The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */ + eTag?: string; + /** The last modified time of the Task. */ + lastModified?: Date; + /** The URL of the Task, if the Task was successfully added. */ + location?: string; + /** The error encountered while attempting to add the Task. */ + error?: BatchError; +} + +/** TaskAddStatus enums */ +/** "Success", "clienterror", "servererror" */ +export type TaskAddStatus = string; + +/** The result of listing the subtasks of a Task. */ +export interface BatchTaskListSubtasksResult { + /** The list of subtasks. */ + value?: SubtaskInformation[]; +} + +/** Information about an Azure Batch subtask. */ +export interface SubtaskInformation { + /** The ID of the subtask. */ + id?: number; + /** Information about the Compute Node on which the subtask ran. */ + nodeInfo?: BatchNodeInformation; + /** The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running. */ + startTime?: Date; + /** The time at which the subtask completed. This property is set only if the subtask is in the Completed state. */ + endTime?: Date; + /** The exit code of the program specified on the subtask command line. This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The current state of the subtask. */ + state?: SubtaskState; + /** The time at which the subtask entered its current state. */ + stateTransitionTime?: Date; + /** The previous state of the subtask. This property is not set if the subtask is in its initial running state. */ + previousState?: SubtaskState; + /** The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. */ + previousStateTransitionTime?: Date; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; +} + +/** SubtaskState enums */ +/** "preparing", "running", "completed" */ +export type SubtaskState = string; + +/** + * The result of listing the files on a Compute Node, or the files associated with + * a Task on a Compute Node. + */ +export interface NodeFileListResult { + /** The list of files. */ + value?: NodeFile[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** Information about a file or directory on a Compute Node. */ +export interface NodeFile { + /** The file path. */ + name?: string; + /** The URL of the file. */ + url?: string; + /** Whether the object represents a directory. */ + isDirectory?: boolean; + /** The file properties. */ + properties?: FileProperties; +} + +/** The properties of a file on a Compute Node. */ +export interface FileProperties { + /** The file creation time. The creation time is not returned for files on Linux Compute Nodes. */ + creationTime?: Date; + /** The time at which the file was last modified. */ + lastModified: Date; + /** The length of the file. */ + contentLength: number; + /** The content type of the file. */ + contentType?: string; + /** The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes. */ + fileMode?: string; +} + +/** Options for creating a user account for RDP or SSH access on an Azure Batch Compute Node. */ +export interface BatchNodeUserCreateOptions { + /** The user name of the Account. */ + name: string; + /** Whether the Account should be an administrator on the Compute Node. The default value is false. */ + isAdmin?: boolean; + /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ + expiryTime?: Date; + /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. */ + password?: string; + /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + sshPublicKey?: string; +} + +/** Options for updating a user account for RDP or SSH access on an Azure Batch Compute Node. */ +export interface BatchNodeUserUpdateOptions { + /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */ + password?: string; + /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ + expiryTime?: Date; + /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. */ + sshPublicKey?: string; +} + +/** A Compute Node in the Batch service. */ +export interface BatchNode { + /** The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. */ + id?: string; + /** The URL of the Compute Node. */ + url?: string; + /** The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. */ + state?: BatchNodeState; + /** Whether the Compute Node is available for Task scheduling. */ + schedulingState?: SchedulingState; + /** The time at which the Compute Node entered its current state. */ + stateTransitionTime?: Date; + /** The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. */ + lastBootTime?: Date; + /** The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. */ + allocationTime?: Date; + /** The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. */ + ipAddress?: string; + /** An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ + affinityId?: string; + /** The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + vmSize?: string; + /** The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ + totalTasksRun?: number; + /** The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ + runningTasksCount?: number; + /** The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ + runningTaskSlotsCount?: number; + /** The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ + totalTasksSucceeded?: number; + /** A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. */ + recentTasks?: TaskInformation[]; + /** The Task specified to run on the Compute Node as it joins the Pool. */ + startTask?: StartTask; + /** Runtime information about the execution of the StartTask on the Compute Node. */ + startTaskInfo?: StartTaskInformation; + /** + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + certificateReferences?: CertificateReference[]; + /** The list of errors that are currently being encountered by the Compute Node. */ + errors?: BatchNodeError[]; + /** Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. */ + isDedicated?: boolean; + /** The endpoint configuration for the Compute Node. */ + endpointConfiguration?: BatchNodeEndpointConfiguration; + /** Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. */ + nodeAgentInfo?: NodeAgentInformation; + /** Info about the current state of the virtual machine. */ + virtualMachineInfo?: VirtualMachineInfo; +} + +/** BatchNodeState enums */ +/** "idle", "rebooting", "reimaging", "running", "unusable", "creating", "starting", "waitingforstarttask", "starttaskfailed", "unknown", "leavingpool", "offline", "preempted" */ +export type BatchNodeState = string; +/** SchedulingState enums */ +/** "enabled", "disabled" */ +export type SchedulingState = string; + +/** Information about a Task running on a Compute Node. */ +export interface TaskInformation { + /** The URL of the Task. */ + taskUrl?: string; + /** The ID of the Job to which the Task belongs. */ + jobId?: string; + /** The ID of the Task. */ + taskId?: string; + /** The ID of the subtask if the Task is a multi-instance Task. */ + subtaskId?: number; + /** The current state of the Task. */ + taskState: TaskState; + /** Information about the execution of the Task. */ + executionInfo?: TaskExecutionInformation; +} + +/** Information about a StartTask running on a Compute Node. */ +export interface StartTaskInformation { + /** The state of the StartTask on the Compute Node. */ + state: StartTaskState; + /** The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). */ + startTime: Date; + /** The time at which the StartTask stopped running. This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running. */ + endTime?: Date; + /** The exit code of the program specified on the StartTask command line. This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ + retryCount: number; + /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ + lastRetryTime?: Date; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; +} + +/** StartTaskState enums */ +/** "running", "completed" */ +export type StartTaskState = string; + +/** An error encountered by a Compute Node. */ +export interface BatchNodeError { + /** An identifier for the Compute Node error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Compute Node error, intended to be suitable for display in a user interface. */ + message?: string; + /** The list of additional error details related to the Compute Node error. */ + errorDetails?: NameValuePair[]; +} + +/** The endpoint configuration for the Compute Node. */ +export interface BatchNodeEndpointConfiguration { + /** The list of inbound endpoints that are accessible on the Compute Node. */ + inboundEndpoints: InboundEndpoint[]; +} + +/** An inbound endpoint on a Compute Node. */ +export interface InboundEndpoint { + /** The name of the endpoint. */ + name: string; + /** The protocol of the endpoint. */ + protocol: InboundEndpointProtocol; + /** The public IP address of the Compute Node. */ + publicIPAddress?: string; + /** The public fully qualified domain name for the Compute Node. */ + publicFQDN?: string; + /** The public port number of the endpoint. */ + frontendPort: number; + /** The backend port number of the endpoint. */ + backendPort: number; +} + +/** + * The Batch Compute Node agent is a program that runs on each Compute Node in the + * Pool and provides Batch capability on the Compute Node. + */ +export interface NodeAgentInformation { + /** The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. */ + version: string; + /** The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version. */ + lastUpdateTime: Date; +} + +/** Info about the current state of the virtual machine. */ +export interface VirtualMachineInfo { + /** The reference to the Azure Virtual Machine's Marketplace Image. */ + imageReference?: ImageReference; +} + +/** Options for rebooting an Azure Batch Compute Node. */ +export interface NodeRebootOptions { + /** When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. */ + nodeRebootOption?: BatchNodeRebootOption; +} + +/** BatchNodeRebootOption enums */ +/** "requeue", "terminate", "taskcompletion", "retaineddata" */ +export type BatchNodeRebootOption = string; + +/** Options for reimaging an Azure Batch Compute Node. */ +export interface NodeReimageOptions { + /** When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. */ + nodeReimageOption?: BatchNodeReimageOption; +} + +/** BatchNodeReimageOption enums */ +/** "requeue", "terminate", "taskcompletion", "retaineddata" */ +export type BatchNodeReimageOption = string; + +/** Options for disabling scheduling on an Azure Batch Compute Node. */ +export interface NodeDisableSchedulingOptions { + /** What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. */ + nodeDisableSchedulingOption?: DisableBatchNodeSchedulingOption; +} + +/** DisableBatchNodeSchedulingOption enums */ +/** "requeue", "terminate", "taskcompletion" */ +export type DisableBatchNodeSchedulingOption = string; + +/** The remote login settings for a Compute Node. */ +export interface BatchNodeRemoteLoginSettingsResult { + /** The IP address used for remote login to the Compute Node. */ + remoteLoginIPAddress: string; + /** The port used for remote login to the Compute Node. */ + remoteLoginPort: number; +} + +/** The Azure Batch service log files upload options for a Compute Node. */ +export interface UploadBatchServiceLogsOptions { + /** The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. */ + containerUrl: string; + /** The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. */ + startTime: Date; + /** The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. */ + endTime?: Date; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ + identityReference?: BatchNodeIdentityReference; +} + +/** The result of uploading Batch service log files from a specific Compute Node. */ +export interface UploadBatchServiceLogsResult { + /** The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. */ + virtualDirectoryName: string; + /** The number of log files which will be uploaded. */ + numberOfFilesUploaded: number; +} + +/** The result of listing the Compute Nodes in a Pool. */ +export interface BatchNodeListResult { + /** The list of Compute Nodes. */ + value?: BatchNode[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** The configuration for virtual machine extension instance view. */ +export interface NodeVMExtension { + /** The provisioning state of the virtual machine extension. */ + provisioningState?: string; + /** The virtual machine extension. */ + vmExtension?: VMExtension; + /** The vm extension instance view. */ + instanceView?: VMExtensionInstanceView; +} + +/** The vm extension instance view. */ +export interface VMExtensionInstanceView { + /** The name of the vm extension instance view. */ + name?: string; + /** The resource status information. */ + statuses?: InstanceViewStatus[]; + /** The resource status information. */ + subStatuses?: InstanceViewStatus[]; +} + +/** The instance view status. */ +export interface InstanceViewStatus { + /** The status code. */ + code?: string; + /** The localized label for the status. */ + displayStatus?: string; + /** Level code. */ + level?: StatusLevelTypes; + /** The detailed status message. */ + message?: string; + /** The time of the status. */ + time?: string; +} + +/** Level code. */ +/** "Error", "Info", "Warning" */ +export type StatusLevelTypes = string; + +/** The result of listing the Compute Node extensions in a Node. */ +export interface NodeVMExtensionList { + /** The list of Compute Node extensions. */ + value?: NodeVMExtension[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/options.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/options.ts new file mode 100644 index 0000000000..c8c92825b9 --- /dev/null +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/options.ts @@ -0,0 +1,1618 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { OperationOptions } from "@azure-rest/core-client"; + +export interface ListApplicationsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface GetApplicationOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface ListPoolUsageMetricsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * The earliest time from which to include metrics. This must be at least two and + * a half hours before the current time. If not specified this defaults to the + * start time of the last aggregation interval currently available. + */ + starttime?: Date; + /** + * The latest time from which to include metrics. This must be at least two hours + * before the current time. If not specified this defaults to the end time of the + * last aggregation interval currently available. + */ + endtime?: Date; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + */ + $filter?: string; +} + +export interface CreatePoolOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface ListPoolsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface DeletePoolOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface PoolExistsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface GetPoolOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface UpdatePoolOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface DisablePoolAutoScaleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface EnablePoolAutoScaleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface EvaluatePoolAutoScaleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface ResizePoolOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface StopPoolResizeOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface ReplacePoolPropertiesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface RemoveNodesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface ListSupportedImagesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + */ + $filter?: string; +} + +export interface ListPoolNodeCountsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + */ + $filter?: string; +} + +export interface DeleteJobOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface GetJobOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface UpdateJobOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface ReplaceJobOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface DisableJobOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface EnableJobOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface TerminateJobOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface CreateJobOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface ListJobsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface ListJobsFromScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface ListJobPreparationAndReleaseTaskStatusOptions + extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; +} + +export interface GetJobTaskCountsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface CreateCertificateOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface ListCertificatesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; +} + +export interface CancelCertificateDeletionOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface DeleteCertificateOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface GetCertificateOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; +} + +export interface JobScheduleExistsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface DeleteJobScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface GetJobScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface UpdateJobScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface ReplaceJobScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface DisableJobScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface EnableJobScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface TerminateJobScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface CreateJobScheduleOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface ListJobSchedulesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface CreateTaskOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface ListTasksOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface CreateTaskCollectionOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface DeleteTaskOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface GetTaskOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface ReplaceTaskOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** Type of content */ + contentType?: string; +} + +export interface ListSubTasksOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; +} + +export interface TerminateTaskOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface ReactivateTaskOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + ifMatch?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + ifNoneMatch?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface DeleteTaskFileOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * Whether to delete children of a directory. If the filePath parameter represents + * a directory instead of a file, you can set recursive to true to delete the + * directory and all of the files and subdirectories in it. If recursive is false + * then the directory must be empty or deletion will fail. + */ + recursive?: boolean; +} + +export interface GetTaskFileOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** + * The byte range to be retrieved. The default is to retrieve the entire file. The + * format is bytes=startRange-endRange. + */ + ocpRange?: string; +} + +export interface GetTaskFilePropertiesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface ListTaskFilesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + */ + $filter?: string; + /** + * Whether to list children of the Task directory. This parameter can be used in + * combination with the filter parameter to list specific type of files. + */ + recursive?: boolean; +} + +export interface CreateNodeUserOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface DeleteNodeUserOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface ReplaceNodeUserOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface GetNodeOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; +} + +export interface RebootNodeOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface ReimageNodeOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface DisableNodeSchedulingOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface EnableNodeSchedulingOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface GetNodeRemoteLoginSettingsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface GetNodeRemoteDesktopFileOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; +} + +export interface UploadNodeLogsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** Type of content */ + contentType?: string; +} + +export interface ListNodesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; +} + +export interface GetNodeExtensionOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; +} + +export interface ListNodeExtensionsOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; +} + +export interface DeleteNodeFileOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * Whether to delete children of a directory. If the filePath parameter represents + * a directory instead of a file, you can set recursive to true to delete the + * directory and all of the files and subdirectories in it. If recursive is false + * then the directory must be empty or deletion will fail. + */ + recursive?: boolean; +} + +export interface GetNodeFileOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; + /** + * The byte range to be retrieved. The default is to retrieve the entire file. The + * format is bytes=startRange-endRange. + */ + ocpRange?: string; +} + +export interface GetNodeFilePropertiesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + ifModifiedSince?: Date; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + ifUnmodifiedSince?: Date; +} + +export interface ListNodeFilesOptions extends OperationOptions { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + */ + $filter?: string; + /** Whether to list children of a directory. */ + recursive?: boolean; +} diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/batchService.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/batchClient.ts similarity index 63% rename from packages/typespec-test/test/batch/generated/typespec-ts/src/batchService.ts rename to packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/batchClient.ts index 6e2c372de2..1bd6593fcc 100644 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/batchService.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/batchClient.ts @@ -2,13 +2,13 @@ // Licensed under the MIT license. import { getClient, ClientOptions } from "@azure-rest/core-client"; -import { logger } from "./logger"; +import { logger } from "../logger.js"; import { TokenCredential } from "@azure/core-auth"; -import { BatchServiceClient } from "./clientDefinitions"; +import { BatchContext } from "./clientDefinitions.js"; /** - * Initialize a new instance of `BatchServiceClient` - * @param endpoint - The parameter endpoint + * Initialize a new instance of `BatchContext` + * @param endpoint - Batch account endpoint (for example: https://batchaccount.eastus2.batch.azure.com). * @param credentials - uniquely identify client credential * @param options - the parameter for all optional parameters */ @@ -16,16 +16,9 @@ export default function createClient( endpoint: string, credentials: TokenCredential, options: ClientOptions = {} -): BatchServiceClient { +): BatchContext { const baseUrl = options.baseUrl ?? `${endpoint}`; - options.apiVersion = options.apiVersion ?? "2022-10-01.16.0"; - options = { - ...options, - credentials: { - scopes: ["user_impersonation"], - }, - }; - + options.apiVersion = options.apiVersion ?? "2023-05-01.17.0"; const userAgentInfo = `azsdk-js-batch-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -39,9 +32,19 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + telemetryOptions: { + clientRequestIdHeaderName: + options.telemetryOptions?.clientRequestIdHeaderName ?? + "client-request-id", + }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://batch.core.windows.net//.default", + ], + }, }; - const client = getClient(baseUrl, credentials, options) as BatchServiceClient; + const client = getClient(baseUrl, credentials, options) as BatchContext; return client; } diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/clientDefinitions.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/clientDefinitions.ts index 71a2e5c67e..0aeda36b42 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/clientDefinitions.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/clientDefinitions.ts @@ -2,248 +2,242 @@ // Licensed under the MIT license. import { - ApplicationsListApplicationsParameters, - ApplicationsGetParameters, - PoolListUsageMetricsParameters, - PoolGetAllPoolLifetimeStatisticsParameters, - PoolAddPoolParameters, - PoolListPoolsParameters, - PoolDeletePoolParameters, + ListApplicationsParameters, + GetApplicationParameters, + ListPoolUsageMetricsParameters, + CreatePoolParameters, + ListPoolsParameters, + DeletePoolParameters, PoolExistsParameters, - PoolGetPoolParameters, - PoolPatchPoolParameters, - PoolDisableAutoScaleParameters, - PoolEnableAutoScaleParameters, - PoolEvaluateAutoScaleParameters, - PoolResizeParameters, - PoolStopResizeParameters, - PoolUpdatePropertiesParameters, - PoolRemoveNodesParameters, - AccountListSupportedImagesParameters, - AccountListPoolNodeCountsParameters, - JobGetAllJobLifetimeStatisticsParameters, - JobDeleteJobParameters, - JobGetJobParameters, - JobPatchJobParameters, - JobUpdateJobParameters, - JobDisableJobParameters, - JobEnableJobParameters, - JobTerminateJobParameters, - JobAddJobParameters, - JobListJobsParameters, - JobListFromJobScheduleParameters, - JobListPreparationAndReleaseTaskStatusParameters, - JobGetTaskCountsParameters, - CertificatesAddCertificateParameters, - CertificatesListCertificatesParameters, - CertificatesCancelCertificateDeletionParameters, - CertificatesDeleteCertificateParameters, - CertificatesGetCertificateParameters, - FileDeleteFromTaskParameters, - FileGetFromTaskParameters, - FileGetPropertiesFromTaskParameters, - FileDeleteFromComputeNodeParameters, - FileGetFromComputeNodeParameters, - FileGetPropertiesFromComputeNodeParameters, - FileListFromTaskParameters, - FileListFromComputeNodeParameters, - JobScheduleJobScheduleExistsParameters, - JobScheduleDeleteJobScheduleParameters, - JobScheduleGetJobScheduleParameters, - JobSchedulePatchJobScheduleParameters, - JobScheduleUpdateJobScheduleParameters, - JobScheduleDisableJobScheduleParameters, - JobScheduleEnableJobScheduleParameters, - JobScheduleTerminateJobScheduleParameters, - JobScheduleAddJobScheduleParameters, - JobScheduleListJobSchedulesParameters, - TaskAddTaskParameters, - TaskListTasksParameters, - TaskAddTaskCollectionParameters, - TaskDeleteTaskCollectionParameters, - TaskGetTaskCollectionParameters, - TaskUpdateTaskCollectionParameters, - TaskListSubtasksParameters, - TaskTerminateTaskCollectionParameters, - TaskReactivateTaskCollectionParameters, - ComputeNodesAddUserParameters, - ComputeNodesDeleteUserParameters, - ComputeNodesUpdateUserParameters, - ComputeNodesGetComputeNodeParameters, - ComputeNodesRebootComputeNodeParameters, - ComputeNodesReimageComputeNodeParameters, - ComputeNodesDisableSchedulingParameters, - ComputeNodesEnableSchedulingParameters, - ComputeNodesGetRemoteLoginSettingsParameters, - ComputeNodesGetRemoteDesktopParameters, - ComputeNodesUploadBatchServiceLogsParameters, - ComputeNodesListParameters, - ComputeNodeExtensionsGetComputeNodeExtensionsParameters, - ComputeNodeExtensionsListComputeNodeExtensionsParameters, + GetPoolParameters, + UpdatePoolParameters, + DisablePoolAutoScaleParameters, + EnablePoolAutoScaleParameters, + EvaluatePoolAutoScaleParameters, + ResizePoolParameters, + StopPoolResizeParameters, + ReplacePoolPropertiesParameters, + RemoveNodesParameters, + ListSupportedImagesParameters, + ListPoolNodeCountsParameters, + DeleteJobParameters, + GetJobParameters, + UpdateJobParameters, + ReplaceJobParameters, + DisableJobParameters, + EnableJobParameters, + TerminateJobParameters, + CreateJobParameters, + ListJobsParameters, + ListJobsFromScheduleParameters, + ListJobPreparationAndReleaseTaskStatusParameters, + GetJobTaskCountsParameters, + CreateCertificateParameters, + ListCertificatesParameters, + CancelCertificateDeletionParameters, + DeleteCertificateParameters, + GetCertificateParameters, + JobScheduleExistsParameters, + DeleteJobScheduleParameters, + GetJobScheduleParameters, + UpdateJobScheduleParameters, + ReplaceJobScheduleParameters, + DisableJobScheduleParameters, + EnableJobScheduleParameters, + TerminateJobScheduleParameters, + CreateJobScheduleParameters, + ListJobSchedulesParameters, + CreateTaskParameters, + ListTasksParameters, + CreateTaskCollectionParameters, + DeleteTaskParameters, + GetTaskParameters, + ReplaceTaskParameters, + ListSubTasksParameters, + TerminateTaskParameters, + ReactivateTaskParameters, + DeleteTaskFileParameters, + GetTaskFileParameters, + GetTaskFilePropertiesParameters, + ListTaskFilesParameters, + CreateNodeUserParameters, + DeleteNodeUserParameters, + ReplaceNodeUserParameters, + GetNodeParameters, + RebootNodeParameters, + ReimageNodeParameters, + DisableNodeSchedulingParameters, + EnableNodeSchedulingParameters, + GetNodeRemoteLoginSettingsParameters, + GetNodeRemoteDesktopFileParameters, + UploadNodeLogsParameters, + ListNodesParameters, + GetNodeExtensionParameters, + ListNodeExtensionsParameters, + DeleteNodeFileParameters, + GetNodeFileParameters, + GetNodeFilePropertiesParameters, + ListNodeFilesParameters, } from "./parameters.js"; import { - ApplicationsListApplications200Response, - ApplicationsListApplicationsDefaultResponse, - ApplicationsGet200Response, - ApplicationsGetDefaultResponse, - PoolListUsageMetrics200Response, - PoolListUsageMetricsDefaultResponse, - PoolGetAllPoolLifetimeStatistics200Response, - PoolGetAllPoolLifetimeStatisticsDefaultResponse, - PoolAddPool201Response, - PoolAddPoolDefaultResponse, - PoolListPools200Response, - PoolListPoolsDefaultResponse, - PoolDeletePool202Response, - PoolDeletePoolDefaultResponse, + ListApplications200Response, + ListApplicationsDefaultResponse, + GetApplication200Response, + GetApplicationDefaultResponse, + ListPoolUsageMetrics200Response, + ListPoolUsageMetricsDefaultResponse, + CreatePool201Response, + CreatePoolDefaultResponse, + ListPools200Response, + ListPoolsDefaultResponse, + DeletePool202Response, + DeletePoolDefaultResponse, PoolExists200Response, PoolExists404Response, PoolExistsDefaultResponse, - PoolGetPool200Response, - PoolGetPoolDefaultResponse, - PoolPatchPool200Response, - PoolPatchPoolDefaultResponse, - PoolDisableAutoScale200Response, - PoolDisableAutoScaleDefaultResponse, - PoolEnableAutoScale200Response, - PoolEnableAutoScaleDefaultResponse, - PoolEvaluateAutoScale200Response, - PoolEvaluateAutoScaleDefaultResponse, - PoolResize202Response, - PoolResizeDefaultResponse, - PoolStopResize202Response, - PoolStopResizeDefaultResponse, - PoolUpdateProperties204Response, - PoolUpdatePropertiesDefaultResponse, - PoolRemoveNodes202Response, - PoolRemoveNodesDefaultResponse, - AccountListSupportedImages200Response, - AccountListSupportedImagesDefaultResponse, - AccountListPoolNodeCounts200Response, - AccountListPoolNodeCountsDefaultResponse, - JobGetAllJobLifetimeStatistics200Response, - JobGetAllJobLifetimeStatisticsDefaultResponse, - JobDeleteJob202Response, - JobDeleteJobDefaultResponse, - JobGetJob200Response, - JobGetJobDefaultResponse, - JobPatchJob200Response, - JobPatchJobDefaultResponse, - JobUpdateJob200Response, - JobUpdateJobDefaultResponse, - JobDisableJob202Response, - JobDisableJobDefaultResponse, - JobEnableJob202Response, - JobEnableJobDefaultResponse, - JobTerminateJob202Response, - JobTerminateJobDefaultResponse, - JobAddJob201Response, - JobAddJobDefaultResponse, - JobListJobs200Response, - JobListJobsDefaultResponse, - JobListFromJobSchedule200Response, - JobListFromJobScheduleDefaultResponse, - JobListPreparationAndReleaseTaskStatus200Response, - JobListPreparationAndReleaseTaskStatusDefaultResponse, - JobGetTaskCounts200Response, - JobGetTaskCountsDefaultResponse, - CertificatesAddCertificate201Response, - CertificatesAddCertificateDefaultResponse, - CertificatesListCertificates200Response, - CertificatesListCertificatesDefaultResponse, - CertificatesCancelCertificateDeletion204Response, - CertificatesCancelCertificateDeletionDefaultResponse, - CertificatesDeleteCertificate202Response, - CertificatesDeleteCertificateDefaultResponse, - CertificatesGetCertificate200Response, - CertificatesGetCertificateDefaultResponse, - FileDeleteFromTask200Response, - FileDeleteFromTaskDefaultResponse, - FileGetFromTask200Response, - FileGetFromTaskDefaultResponse, - FileGetPropertiesFromTask200Response, - FileGetPropertiesFromTaskDefaultResponse, - FileDeleteFromComputeNode200Response, - FileDeleteFromComputeNodeDefaultResponse, - FileGetFromComputeNode200Response, - FileGetFromComputeNodeDefaultResponse, - FileGetPropertiesFromComputeNode200Response, - FileGetPropertiesFromComputeNodeDefaultResponse, - FileListFromTask200Response, - FileListFromTaskDefaultResponse, - FileListFromComputeNode200Response, - FileListFromComputeNodeDefaultResponse, - JobScheduleJobScheduleExists200Response, - JobScheduleJobScheduleExists404Response, - JobScheduleJobScheduleExistsDefaultResponse, - JobScheduleDeleteJobSchedule202Response, - JobScheduleDeleteJobScheduleDefaultResponse, - JobScheduleGetJobSchedule200Response, - JobScheduleGetJobScheduleDefaultResponse, - JobSchedulePatchJobSchedule200Response, - JobSchedulePatchJobScheduleDefaultResponse, - JobScheduleUpdateJobSchedule200Response, - JobScheduleUpdateJobScheduleDefaultResponse, - JobScheduleDisableJobSchedule204Response, - JobScheduleDisableJobScheduleDefaultResponse, - JobScheduleEnableJobSchedule204Response, - JobScheduleEnableJobScheduleDefaultResponse, - JobScheduleTerminateJobSchedule202Response, - JobScheduleTerminateJobScheduleDefaultResponse, - JobScheduleAddJobSchedule201Response, - JobScheduleAddJobScheduleDefaultResponse, - JobScheduleListJobSchedules200Response, - JobScheduleListJobSchedulesDefaultResponse, - TaskAddTask201Response, - TaskAddTaskDefaultResponse, - TaskListTasks200Response, - TaskListTasksDefaultResponse, - TaskAddTaskCollection200Response, - TaskAddTaskCollectionDefaultResponse, - TaskDeleteTaskCollection200Response, - TaskDeleteTaskCollectionDefaultResponse, - TaskGetTaskCollection200Response, - TaskGetTaskCollectionDefaultResponse, - TaskUpdateTaskCollection200Response, - TaskUpdateTaskCollectionDefaultResponse, - TaskListSubtasks200Response, - TaskListSubtasksDefaultResponse, - TaskTerminateTaskCollection204Response, - TaskTerminateTaskCollectionDefaultResponse, - TaskReactivateTaskCollection204Response, - TaskReactivateTaskCollectionDefaultResponse, - ComputeNodesAddUser201Response, - ComputeNodesAddUserDefaultResponse, - ComputeNodesDeleteUser200Response, - ComputeNodesDeleteUserDefaultResponse, - ComputeNodesUpdateUser200Response, - ComputeNodesUpdateUserDefaultResponse, - ComputeNodesGetComputeNode200Response, - ComputeNodesGetComputeNodeDefaultResponse, - ComputeNodesRebootComputeNode202Response, - ComputeNodesRebootComputeNodeDefaultResponse, - ComputeNodesReimageComputeNode202Response, - ComputeNodesReimageComputeNodeDefaultResponse, - ComputeNodesDisableScheduling200Response, - ComputeNodesDisableSchedulingDefaultResponse, - ComputeNodesEnableScheduling200Response, - ComputeNodesEnableSchedulingDefaultResponse, - ComputeNodesGetRemoteLoginSettings200Response, - ComputeNodesGetRemoteLoginSettingsDefaultResponse, - ComputeNodesGetRemoteDesktop200Response, - ComputeNodesGetRemoteDesktopDefaultResponse, - ComputeNodesUploadBatchServiceLogs200Response, - ComputeNodesUploadBatchServiceLogsDefaultResponse, - ComputeNodesList200Response, - ComputeNodesListDefaultResponse, - ComputeNodeExtensionsGetComputeNodeExtensions200Response, - ComputeNodeExtensionsGetComputeNodeExtensionsDefaultResponse, - ComputeNodeExtensionsListComputeNodeExtensions200Response, - ComputeNodeExtensionsListComputeNodeExtensionsDefaultResponse, + GetPool200Response, + GetPoolDefaultResponse, + UpdatePool200Response, + UpdatePoolDefaultResponse, + DisablePoolAutoScale200Response, + DisablePoolAutoScaleDefaultResponse, + EnablePoolAutoScale200Response, + EnablePoolAutoScaleDefaultResponse, + EvaluatePoolAutoScale200Response, + EvaluatePoolAutoScaleDefaultResponse, + ResizePool202Response, + ResizePoolDefaultResponse, + StopPoolResize202Response, + StopPoolResizeDefaultResponse, + ReplacePoolProperties204Response, + ReplacePoolPropertiesDefaultResponse, + RemoveNodes202Response, + RemoveNodesDefaultResponse, + ListSupportedImages200Response, + ListSupportedImagesDefaultResponse, + ListPoolNodeCounts200Response, + ListPoolNodeCountsDefaultResponse, + DeleteJob202Response, + DeleteJobDefaultResponse, + GetJob200Response, + GetJobDefaultResponse, + UpdateJob200Response, + UpdateJobDefaultResponse, + ReplaceJob200Response, + ReplaceJobDefaultResponse, + DisableJob202Response, + DisableJobDefaultResponse, + EnableJob202Response, + EnableJobDefaultResponse, + TerminateJob202Response, + TerminateJobDefaultResponse, + CreateJob201Response, + CreateJobDefaultResponse, + ListJobs200Response, + ListJobsDefaultResponse, + ListJobsFromSchedule200Response, + ListJobsFromScheduleDefaultResponse, + ListJobPreparationAndReleaseTaskStatus200Response, + ListJobPreparationAndReleaseTaskStatusDefaultResponse, + GetJobTaskCounts200Response, + GetJobTaskCountsDefaultResponse, + CreateCertificate201Response, + CreateCertificateDefaultResponse, + ListCertificates200Response, + ListCertificatesDefaultResponse, + CancelCertificateDeletion204Response, + CancelCertificateDeletionDefaultResponse, + DeleteCertificate202Response, + DeleteCertificateDefaultResponse, + GetCertificate200Response, + GetCertificateDefaultResponse, + JobScheduleExists200Response, + JobScheduleExists404Response, + JobScheduleExistsDefaultResponse, + DeleteJobSchedule202Response, + DeleteJobScheduleDefaultResponse, + GetJobSchedule200Response, + GetJobScheduleDefaultResponse, + UpdateJobSchedule200Response, + UpdateJobScheduleDefaultResponse, + ReplaceJobSchedule200Response, + ReplaceJobScheduleDefaultResponse, + DisableJobSchedule204Response, + DisableJobScheduleDefaultResponse, + EnableJobSchedule204Response, + EnableJobScheduleDefaultResponse, + TerminateJobSchedule202Response, + TerminateJobScheduleDefaultResponse, + CreateJobSchedule201Response, + CreateJobScheduleDefaultResponse, + ListJobSchedules200Response, + ListJobSchedulesDefaultResponse, + CreateTask201Response, + CreateTaskDefaultResponse, + ListTasks200Response, + ListTasksDefaultResponse, + CreateTaskCollection200Response, + CreateTaskCollectionDefaultResponse, + DeleteTask200Response, + DeleteTaskDefaultResponse, + GetTask200Response, + GetTaskDefaultResponse, + ReplaceTask200Response, + ReplaceTaskDefaultResponse, + ListSubTasks200Response, + ListSubTasksDefaultResponse, + TerminateTask204Response, + TerminateTaskDefaultResponse, + ReactivateTask204Response, + ReactivateTaskDefaultResponse, + DeleteTaskFile200Response, + DeleteTaskFileDefaultResponse, + GetTaskFile200Response, + GetTaskFileDefaultResponse, + GetTaskFileProperties200Response, + GetTaskFilePropertiesDefaultResponse, + ListTaskFiles200Response, + ListTaskFilesDefaultResponse, + CreateNodeUser201Response, + CreateNodeUserDefaultResponse, + DeleteNodeUser200Response, + DeleteNodeUserDefaultResponse, + ReplaceNodeUser200Response, + ReplaceNodeUserDefaultResponse, + GetNode200Response, + GetNodeDefaultResponse, + RebootNode202Response, + RebootNodeDefaultResponse, + ReimageNode202Response, + ReimageNodeDefaultResponse, + DisableNodeScheduling200Response, + DisableNodeSchedulingDefaultResponse, + EnableNodeScheduling200Response, + EnableNodeSchedulingDefaultResponse, + GetNodeRemoteLoginSettings200Response, + GetNodeRemoteLoginSettingsDefaultResponse, + GetNodeRemoteDesktopFile200Response, + GetNodeRemoteDesktopFileDefaultResponse, + UploadNodeLogs200Response, + UploadNodeLogsDefaultResponse, + ListNodes200Response, + ListNodesDefaultResponse, + GetNodeExtension200Response, + GetNodeExtensionDefaultResponse, + ListNodeExtensions200Response, + ListNodeExtensionsDefaultResponse, + DeleteNodeFile200Response, + DeleteNodeFileDefaultResponse, + GetNodeFile200Response, + GetNodeFileDefaultResponse, + GetNodeFileProperties200Response, + GetNodeFilePropertiesDefaultResponse, + ListNodeFiles200Response, + ListNodeFilesDefaultResponse, } from "./responses.js"; import { Client, StreamableMethod } from "@azure-rest/core-client"; -export interface ApplicationsListApplications { +export interface ListApplications { /** * This operation returns only Applications and versions that are available for * use on Compute Nodes; that is, that can be used in an Package reference. For @@ -252,14 +246,13 @@ export interface ApplicationsListApplications { * API. */ get( - options?: ApplicationsListApplicationsParameters + options?: ListApplicationsParameters ): StreamableMethod< - | ApplicationsListApplications200Response - | ApplicationsListApplicationsDefaultResponse + ListApplications200Response | ListApplicationsDefaultResponse >; } -export interface ApplicationsGet { +export interface GetApplication { /** * This operation returns only Applications and versions that are available for * use on Compute Nodes; that is, that can be used in an Package reference. For @@ -268,13 +261,13 @@ export interface ApplicationsGet { * API. */ get( - options?: ApplicationsGetParameters + options?: GetApplicationParameters ): StreamableMethod< - ApplicationsGet200Response | ApplicationsGetDefaultResponse + GetApplication200Response | GetApplicationDefaultResponse >; } -export interface PoolListUsageMetrics { +export interface ListPoolUsageMetrics { /** * If you do not specify a $filter clause including a poolId, the response * includes all Pools that existed in the Account in the time range of the @@ -284,43 +277,28 @@ export interface PoolListUsageMetrics { * last aggregation interval is returned. */ get( - options?: PoolListUsageMetricsParameters + options?: ListPoolUsageMetricsParameters ): StreamableMethod< - PoolListUsageMetrics200Response | PoolListUsageMetricsDefaultResponse + ListPoolUsageMetrics200Response | ListPoolUsageMetricsDefaultResponse >; } -export interface PoolGetAllPoolLifetimeStatistics { - /** - * Statistics are aggregated across all Pools that have ever existed in the - * Account, from Account creation to the last update time of the statistics. The - * statistics may not be immediately available. The Batch service performs - * periodic roll-up of statistics. The typical delay is about 30 minutes. - */ - get( - options?: PoolGetAllPoolLifetimeStatisticsParameters - ): StreamableMethod< - | PoolGetAllPoolLifetimeStatistics200Response - | PoolGetAllPoolLifetimeStatisticsDefaultResponse - >; -} - -export interface PoolAddPool { +export interface CreatePool { /** * When naming Pools, avoid including sensitive information such as user names or * secret project names. This information may appear in telemetry logs accessible * to Microsoft Support engineers. */ post( - options: PoolAddPoolParameters - ): StreamableMethod; + options: CreatePoolParameters + ): StreamableMethod; /** Lists all of the Pools in the specified Account. */ get( - options?: PoolListPoolsParameters - ): StreamableMethod; + options?: ListPoolsParameters + ): StreamableMethod; } -export interface PoolDeletePool { +export interface DeletePool { /** * When you request that a Pool be deleted, the following actions occur: the Pool * state is set to deleting; any ongoing resize operation on the Pool are stopped; @@ -336,10 +314,8 @@ export interface PoolDeletePool { * error code PoolBeingDeleted. */ delete( - options?: PoolDeletePoolParameters - ): StreamableMethod< - PoolDeletePool202Response | PoolDeletePoolDefaultResponse - >; + options?: DeletePoolParameters + ): StreamableMethod; /** Gets basic properties of a Pool. */ head( options?: PoolExistsParameters @@ -348,28 +324,28 @@ export interface PoolDeletePool { >; /** Gets information about the specified Pool. */ get( - options?: PoolGetPoolParameters - ): StreamableMethod; + options?: GetPoolParameters + ): StreamableMethod; /** * This only replaces the Pool properties specified in the request. For example, * if the Pool has a StartTask associated with it, and a request does not specify * a StartTask element, then the Pool keeps the existing StartTask. */ patch( - options: PoolPatchPoolParameters - ): StreamableMethod; + options: UpdatePoolParameters + ): StreamableMethod; } -export interface PoolDisableAutoScale { +export interface DisablePoolAutoScale { /** Disables automatic scaling for a Pool. */ post( - options?: PoolDisableAutoScaleParameters + options?: DisablePoolAutoScaleParameters ): StreamableMethod< - PoolDisableAutoScale200Response | PoolDisableAutoScaleDefaultResponse + DisablePoolAutoScale200Response | DisablePoolAutoScaleDefaultResponse >; } -export interface PoolEnableAutoScale { +export interface EnablePoolAutoScale { /** * You cannot enable automatic scaling on a Pool if a resize operation is in * progress on the Pool. If automatic scaling of the Pool is currently disabled, @@ -379,26 +355,26 @@ export interface PoolEnableAutoScale { * more than once every 30 seconds. */ post( - options: PoolEnableAutoScaleParameters + options: EnablePoolAutoScaleParameters ): StreamableMethod< - PoolEnableAutoScale200Response | PoolEnableAutoScaleDefaultResponse + EnablePoolAutoScale200Response | EnablePoolAutoScaleDefaultResponse >; } -export interface PoolEvaluateAutoScale { +export interface EvaluatePoolAutoScale { /** * This API is primarily for validating an autoscale formula, as it simply returns * the result without applying the formula to the Pool. The Pool must have auto * scaling enabled in order to evaluate a formula. */ post( - options: PoolEvaluateAutoScaleParameters + options: EvaluatePoolAutoScaleParameters ): StreamableMethod< - PoolEvaluateAutoScale200Response | PoolEvaluateAutoScaleDefaultResponse + EvaluatePoolAutoScale200Response | EvaluatePoolAutoScaleDefaultResponse >; } -export interface PoolResize { +export interface ResizePool { /** * You can only resize a Pool when its allocation state is steady. If the Pool is * already resizing, the request fails with status code 409. When you resize a @@ -409,11 +385,11 @@ export interface PoolResize { * Nodes, use the Pool remove Compute Nodes API instead. */ post( - options: PoolResizeParameters - ): StreamableMethod; + options: ResizePoolParameters + ): StreamableMethod; } -export interface PoolStopResize { +export interface StopPoolResize { /** * This does not restore the Pool to its previous state before the resize * operation: it only stops any further changes being made, and the Pool maintains @@ -424,78 +400,59 @@ export interface PoolStopResize { * be used to halt the initial sizing of the Pool when it is created. */ post( - options?: PoolStopResizeParameters + options?: StopPoolResizeParameters ): StreamableMethod< - PoolStopResize202Response | PoolStopResizeDefaultResponse + StopPoolResize202Response | StopPoolResizeDefaultResponse >; } -export interface PoolUpdateProperties { +export interface ReplacePoolProperties { /** * This fully replaces all the updatable properties of the Pool. For example, if * the Pool has a StartTask associated with it and if StartTask is not specified * with this request, then the Batch service will remove the existing StartTask. */ post( - options: PoolUpdatePropertiesParameters + options: ReplacePoolPropertiesParameters ): StreamableMethod< - PoolUpdateProperties204Response | PoolUpdatePropertiesDefaultResponse + ReplacePoolProperties204Response | ReplacePoolPropertiesDefaultResponse >; } -export interface PoolRemoveNodes { +export interface RemoveNodes { /** * This operation can only run when the allocation state of the Pool is steady. * When this operation runs, the allocation state changes from steady to resizing. * Each request may remove up to 100 nodes. */ post( - options: PoolRemoveNodesParameters - ): StreamableMethod< - PoolRemoveNodes202Response | PoolRemoveNodesDefaultResponse - >; + options: RemoveNodesParameters + ): StreamableMethod; } -export interface AccountListSupportedImages { +export interface ListSupportedImages { /** Lists all Virtual Machine Images supported by the Azure Batch service. */ get( - options?: AccountListSupportedImagesParameters + options?: ListSupportedImagesParameters ): StreamableMethod< - | AccountListSupportedImages200Response - | AccountListSupportedImagesDefaultResponse + ListSupportedImages200Response | ListSupportedImagesDefaultResponse >; } -export interface AccountListPoolNodeCounts { +export interface ListPoolNodeCounts { /** * Gets the number of Compute Nodes in each state, grouped by Pool. Note that the * numbers returned may not always be up to date. If you need exact node counts, * use a list query. */ get( - options?: AccountListPoolNodeCountsParameters - ): StreamableMethod< - | AccountListPoolNodeCounts200Response - | AccountListPoolNodeCountsDefaultResponse - >; -} - -export interface JobGetAllJobLifetimeStatistics { - /** - * Statistics are aggregated across all Jobs that have ever existed in the - * Account, from Account creation to the last update time of the statistics. The - * statistics may not be immediately available. The Batch service performs - * periodic roll-up of statistics. The typical delay is about 30 minutes. - */ - get( - options?: JobGetAllJobLifetimeStatisticsParameters + options?: ListPoolNodeCountsParameters ): StreamableMethod< - | JobGetAllJobLifetimeStatistics200Response - | JobGetAllJobLifetimeStatisticsDefaultResponse + ListPoolNodeCounts200Response | ListPoolNodeCountsDefaultResponse >; } -export interface JobDeleteJob { +export interface DeleteJob { /** * Deleting a Job also deletes all Tasks that are part of that Job, and all Job * statistics. This also overrides the retention period for Task data; that is, if @@ -507,31 +464,31 @@ export interface JobDeleteJob { * that the Job is being deleted. */ delete( - options?: JobDeleteJobParameters - ): StreamableMethod; + options?: DeleteJobParameters + ): StreamableMethod; /** Gets information about the specified Job. */ get( - options?: JobGetJobParameters - ): StreamableMethod; + options?: GetJobParameters + ): StreamableMethod; /** * This replaces only the Job properties specified in the request. For example, if * the Job has constraints, and a request does not specify the constraints * element, then the Job keeps the existing constraints. */ patch( - options: JobPatchJobParameters - ): StreamableMethod; + options: UpdateJobParameters + ): StreamableMethod; /** * This fully replaces all the updatable properties of the Job. For example, if * the Job has constraints associated with it and if constraints is not specified * with this request, then the Batch service will remove the existing constraints. */ put( - options: JobUpdateJobParameters - ): StreamableMethod; + options: ReplaceJobParameters + ): StreamableMethod; } -export interface JobDisableJob { +export interface DisableJob { /** * The Batch Service immediately moves the Job to the disabling state. Batch then * uses the disableTasks parameter to determine what to do with the currently @@ -543,11 +500,11 @@ export interface JobDisableJob { * the request fails with status code 409. */ post( - options: JobDisableJobParameters - ): StreamableMethod; + options: DisableJobParameters + ): StreamableMethod; } -export interface JobEnableJob { +export interface EnableJob { /** * When you call this API, the Batch service sets a disabled Job to the enabling * state. After the this operation is completed, the Job moves to the active @@ -557,11 +514,11 @@ export interface JobEnableJob { * than 180 days ago, those Tasks will not run. */ post( - options?: JobEnableJobParameters - ): StreamableMethod; + options?: EnableJobParameters + ): StreamableMethod; } -export interface JobTerminateJob { +export interface TerminateJob { /** * When a Terminate Job request is received, the Batch service sets the Job to the * terminating state. The Batch service then terminates any running Tasks @@ -571,13 +528,11 @@ export interface JobTerminateJob { * Tasks cannot be added and any remaining active Tasks will not be scheduled. */ post( - options?: JobTerminateJobParameters - ): StreamableMethod< - JobTerminateJob202Response | JobTerminateJobDefaultResponse - >; + options: TerminateJobParameters + ): StreamableMethod; } -export interface JobAddJob { +export interface CreateJob { /** * The Batch service supports two ways to control the work done as part of a Job. * In the first approach, the user specifies a Job Manager Task. The Batch service @@ -590,24 +545,24 @@ export interface JobAddJob { * engineers. */ post( - options: JobAddJobParameters - ): StreamableMethod; + options: CreateJobParameters + ): StreamableMethod; /** Lists all of the Jobs in the specified Account. */ get( - options?: JobListJobsParameters - ): StreamableMethod; + options?: ListJobsParameters + ): StreamableMethod; } -export interface JobListFromJobSchedule { +export interface ListJobsFromSchedule { /** Lists the Jobs that have been created under the specified Job Schedule. */ get( - options?: JobListFromJobScheduleParameters + options?: ListJobsFromScheduleParameters ): StreamableMethod< - JobListFromJobSchedule200Response | JobListFromJobScheduleDefaultResponse + ListJobsFromSchedule200Response | ListJobsFromScheduleDefaultResponse >; } -export interface JobListPreparationAndReleaseTaskStatus { +export interface ListJobPreparationAndReleaseTaskStatus { /** * This API returns the Job Preparation and Job Release Task status on all Compute * Nodes that have run the Job Preparation or Job Release Task. This includes @@ -617,14 +572,14 @@ export interface JobListPreparationAndReleaseTaskStatus { * JobPreparationTaskNotSpecified. */ get( - options?: JobListPreparationAndReleaseTaskStatusParameters + options?: ListJobPreparationAndReleaseTaskStatusParameters ): StreamableMethod< - | JobListPreparationAndReleaseTaskStatus200Response - | JobListPreparationAndReleaseTaskStatusDefaultResponse + | ListJobPreparationAndReleaseTaskStatus200Response + | ListJobPreparationAndReleaseTaskStatusDefaultResponse >; } -export interface JobGetTaskCounts { +export interface GetJobTaskCounts { /** * Task counts provide a count of the Tasks by active, running or completed Task * state, and a count of Tasks which succeeded or failed. Tasks in the preparing @@ -632,30 +587,28 @@ export interface JobGetTaskCounts { * up to date. If you need exact task counts, use a list query. */ get( - options?: JobGetTaskCountsParameters + options?: GetJobTaskCountsParameters ): StreamableMethod< - JobGetTaskCounts200Response | JobGetTaskCountsDefaultResponse + GetJobTaskCounts200Response | GetJobTaskCountsDefaultResponse >; } -export interface CertificatesAddCertificate { - /** Adds a Certificate to the specified Account. */ +export interface CreateCertificate { + /** Creates a Certificate to the specified Account. */ post( - options: CertificatesAddCertificateParameters + options: CreateCertificateParameters ): StreamableMethod< - | CertificatesAddCertificate201Response - | CertificatesAddCertificateDefaultResponse + CreateCertificate201Response | CreateCertificateDefaultResponse >; /** Lists all of the Certificates that have been added to the specified Account. */ get( - options?: CertificatesListCertificatesParameters + options?: ListCertificatesParameters ): StreamableMethod< - | CertificatesListCertificates200Response - | CertificatesListCertificatesDefaultResponse + ListCertificates200Response | ListCertificatesDefaultResponse >; } -export interface CertificatesCancelCertificateDeletion { +export interface CancelCertificateDeletion { /** * If you try to delete a Certificate that is being used by a Pool or Compute * Node, the status of the Certificate changes to deleteFailed. If you decide that @@ -666,14 +619,14 @@ export interface CertificatesCancelCertificateDeletion { * then you can try again to delete the Certificate. */ post( - options?: CertificatesCancelCertificateDeletionParameters + options?: CancelCertificateDeletionParameters ): StreamableMethod< - | CertificatesCancelCertificateDeletion204Response - | CertificatesCancelCertificateDeletionDefaultResponse + | CancelCertificateDeletion204Response + | CancelCertificateDeletionDefaultResponse >; } -export interface CertificatesDeleteCertificate { +export interface DeleteCertificate { /** * You cannot delete a Certificate if a resource (Pool or Compute Node) is using * it. Before you can delete a Certificate, you must therefore make sure that the @@ -686,91 +639,26 @@ export interface CertificatesDeleteCertificate { * active if you decide that you want to continue using the Certificate. */ delete( - options?: CertificatesDeleteCertificateParameters + options?: DeleteCertificateParameters ): StreamableMethod< - | CertificatesDeleteCertificate202Response - | CertificatesDeleteCertificateDefaultResponse + DeleteCertificate202Response | DeleteCertificateDefaultResponse >; /** Gets information about the specified Certificate. */ get( - options?: CertificatesGetCertificateParameters + options?: GetCertificateParameters ): StreamableMethod< - | CertificatesGetCertificate200Response - | CertificatesGetCertificateDefaultResponse + GetCertificate200Response | GetCertificateDefaultResponse >; } -export interface FileDeleteFromTask { - /** Deletes the specified Task file from the Compute Node where the Task ran. */ - delete( - options?: FileDeleteFromTaskParameters - ): StreamableMethod< - FileDeleteFromTask200Response | FileDeleteFromTaskDefaultResponse - >; - /** Returns the content of the specified Task file. */ - get( - options?: FileGetFromTaskParameters - ): StreamableMethod< - FileGetFromTask200Response | FileGetFromTaskDefaultResponse - >; - /** Gets the properties of the specified Task file. */ - head( - options?: FileGetPropertiesFromTaskParameters - ): StreamableMethod< - | FileGetPropertiesFromTask200Response - | FileGetPropertiesFromTaskDefaultResponse - >; -} - -export interface FileDeleteFromComputeNode { - /** Deletes the specified file from the Compute Node. */ - delete( - options?: FileDeleteFromComputeNodeParameters - ): StreamableMethod< - | FileDeleteFromComputeNode200Response - | FileDeleteFromComputeNodeDefaultResponse - >; - /** Returns the content of the specified Compute Node file. */ - get( - options?: FileGetFromComputeNodeParameters - ): StreamableMethod< - FileGetFromComputeNode200Response | FileGetFromComputeNodeDefaultResponse - >; - /** Gets the properties of the specified Compute Node file. */ - head( - options?: FileGetPropertiesFromComputeNodeParameters - ): StreamableMethod< - | FileGetPropertiesFromComputeNode200Response - | FileGetPropertiesFromComputeNodeDefaultResponse - >; -} - -export interface FileListFromTask { - /** Lists the files in a Task's directory on its Compute Node. */ - get( - options?: FileListFromTaskParameters - ): StreamableMethod< - FileListFromTask200Response | FileListFromTaskDefaultResponse - >; -} - -export interface FileListFromComputeNode { - /** Lists all of the files in Task directories on the specified Compute Node. */ - get( - options?: FileListFromComputeNodeParameters - ): StreamableMethod< - FileListFromComputeNode200Response | FileListFromComputeNodeDefaultResponse - >; -} - -export interface JobScheduleJobScheduleExists { +export interface JobScheduleExists { /** Checks the specified Job Schedule exists. */ head( - options?: JobScheduleJobScheduleExistsParameters + options?: JobScheduleExistsParameters ): StreamableMethod< - | JobScheduleJobScheduleExists200Response - | JobScheduleJobScheduleExists404Response - | JobScheduleJobScheduleExistsDefaultResponse + | JobScheduleExists200Response + | JobScheduleExists404Response + | JobScheduleExistsDefaultResponse >; /** * When you delete a Job Schedule, this also deletes all Jobs and Tasks under that @@ -780,17 +668,15 @@ export interface JobScheduleJobScheduleExists { * though they are still counted towards Account lifetime statistics. */ delete( - options?: JobScheduleDeleteJobScheduleParameters + options?: DeleteJobScheduleParameters ): StreamableMethod< - | JobScheduleDeleteJobSchedule202Response - | JobScheduleDeleteJobScheduleDefaultResponse + DeleteJobSchedule202Response | DeleteJobScheduleDefaultResponse >; /** Gets information about the specified Job Schedule. */ get( - options?: JobScheduleGetJobScheduleParameters + options?: GetJobScheduleParameters ): StreamableMethod< - | JobScheduleGetJobSchedule200Response - | JobScheduleGetJobScheduleDefaultResponse + GetJobSchedule200Response | GetJobScheduleDefaultResponse >; /** * This replaces only the Job Schedule properties specified in the request. For @@ -800,10 +686,9 @@ export interface JobScheduleJobScheduleExists { * running Jobs are unaffected. */ patch( - options: JobSchedulePatchJobScheduleParameters + options: UpdateJobScheduleParameters ): StreamableMethod< - | JobSchedulePatchJobSchedule200Response - | JobSchedulePatchJobScheduleDefaultResponse + UpdateJobSchedule200Response | UpdateJobScheduleDefaultResponse >; /** * This fully replaces all the updatable properties of the Job Schedule. For @@ -813,80 +698,74 @@ export interface JobScheduleJobScheduleExists { * running Jobs are unaffected. */ put( - options: JobScheduleUpdateJobScheduleParameters + options: ReplaceJobScheduleParameters ): StreamableMethod< - | JobScheduleUpdateJobSchedule200Response - | JobScheduleUpdateJobScheduleDefaultResponse + ReplaceJobSchedule200Response | ReplaceJobScheduleDefaultResponse >; } -export interface JobScheduleDisableJobSchedule { +export interface DisableJobSchedule { /** No new Jobs will be created until the Job Schedule is enabled again. */ post( - options?: JobScheduleDisableJobScheduleParameters + options?: DisableJobScheduleParameters ): StreamableMethod< - | JobScheduleDisableJobSchedule204Response - | JobScheduleDisableJobScheduleDefaultResponse + DisableJobSchedule204Response | DisableJobScheduleDefaultResponse >; } -export interface JobScheduleEnableJobSchedule { +export interface EnableJobSchedule { /** Enables a Job Schedule. */ post( - options?: JobScheduleEnableJobScheduleParameters + options?: EnableJobScheduleParameters ): StreamableMethod< - | JobScheduleEnableJobSchedule204Response - | JobScheduleEnableJobScheduleDefaultResponse + EnableJobSchedule204Response | EnableJobScheduleDefaultResponse >; } -export interface JobScheduleTerminateJobSchedule { +export interface TerminateJobSchedule { /** Terminates a Job Schedule. */ post( - options?: JobScheduleTerminateJobScheduleParameters + options?: TerminateJobScheduleParameters ): StreamableMethod< - | JobScheduleTerminateJobSchedule202Response - | JobScheduleTerminateJobScheduleDefaultResponse + TerminateJobSchedule202Response | TerminateJobScheduleDefaultResponse >; } -export interface JobScheduleAddJobSchedule { - /** Adds a Job Schedule to the specified Account. */ +export interface CreateJobSchedule { + /** Creates a Job Schedule to the specified Account. */ post( - options: JobScheduleAddJobScheduleParameters + options: CreateJobScheduleParameters ): StreamableMethod< - | JobScheduleAddJobSchedule201Response - | JobScheduleAddJobScheduleDefaultResponse + CreateJobSchedule201Response | CreateJobScheduleDefaultResponse >; /** Lists all of the Job Schedules in the specified Account. */ get( - options?: JobScheduleListJobSchedulesParameters + options?: ListJobSchedulesParameters ): StreamableMethod< - | JobScheduleListJobSchedules200Response - | JobScheduleListJobSchedulesDefaultResponse + ListJobSchedules200Response | ListJobSchedulesDefaultResponse >; } -export interface TaskAddTask { +export interface CreateTask { /** * The maximum lifetime of a Task from addition to completion is 180 days. If a * Task has not completed within 180 days of being added it will be terminated by * the Batch service and left in whatever state it was in at that time. */ post( - options: TaskAddTaskParameters - ): StreamableMethod; + options: CreateTaskParameters + ): StreamableMethod; /** * For multi-instance Tasks, information such as affinityId, executionInfo and * nodeInfo refer to the primary Task. Use the list subtasks API to retrieve * information about subtasks. */ get( - options?: TaskListTasksParameters - ): StreamableMethod; + options?: ListTasksParameters + ): StreamableMethod; } -export interface TaskAddTaskCollection { +export interface CreateTaskCollection { /** * Note that each Task must have a unique ID. The Batch service may not return the * results for each Task in the same order the Tasks were submitted in this @@ -904,13 +783,13 @@ export interface TaskAddTaskCollection { * service and left in whatever state it was in at that time. */ post( - options: TaskAddTaskCollectionParameters + options: CreateTaskCollectionParameters ): StreamableMethod< - TaskAddTaskCollection200Response | TaskAddTaskCollectionDefaultResponse + CreateTaskCollection200Response | CreateTaskCollectionDefaultResponse >; } -export interface TaskDeleteTaskCollection { +export interface DeleteTask { /** * When a Task is deleted, all of the files in its directory on the Compute Node * where it ran are also deleted (regardless of the retention time). For @@ -919,54 +798,41 @@ export interface TaskDeleteTaskCollection { * background. */ delete( - options?: TaskDeleteTaskCollectionParameters - ): StreamableMethod< - | TaskDeleteTaskCollection200Response - | TaskDeleteTaskCollectionDefaultResponse - >; + options?: DeleteTaskParameters + ): StreamableMethod; /** * For multi-instance Tasks, information such as affinityId, executionInfo and * nodeInfo refer to the primary Task. Use the list subtasks API to retrieve * information about subtasks. */ get( - options?: TaskGetTaskCollectionParameters - ): StreamableMethod< - TaskGetTaskCollection200Response | TaskGetTaskCollectionDefaultResponse - >; + options?: GetTaskParameters + ): StreamableMethod; /** Updates the properties of the specified Task. */ put( - options: TaskUpdateTaskCollectionParameters - ): StreamableMethod< - | TaskUpdateTaskCollection200Response - | TaskUpdateTaskCollectionDefaultResponse - >; + options: ReplaceTaskParameters + ): StreamableMethod; } -export interface TaskListSubtasks { +export interface ListSubTasks { /** If the Task is not a multi-instance Task then this returns an empty collection. */ get( - options?: TaskListSubtasksParameters - ): StreamableMethod< - TaskListSubtasks200Response | TaskListSubtasksDefaultResponse - >; + options?: ListSubTasksParameters + ): StreamableMethod; } -export interface TaskTerminateTaskCollection { +export interface TerminateTask { /** * When the Task has been terminated, it moves to the completed state. For * multi-instance Tasks, the terminate Task operation applies synchronously to the * primary task; subtasks are then terminated asynchronously in the background. */ post( - options?: TaskTerminateTaskCollectionParameters - ): StreamableMethod< - | TaskTerminateTaskCollection204Response - | TaskTerminateTaskCollectionDefaultResponse - >; + options?: TerminateTaskParameters + ): StreamableMethod; } -export interface TaskReactivateTaskCollection { +export interface ReactivateTask { /** * Reactivation makes a Task eligible to be retried again up to its maximum retry * count. The Task's state is changed to active. As the Task is no longer in the @@ -977,34 +843,59 @@ export interface TaskReactivateTaskCollection { * will fail if the Job has completed (or is terminating or deleting). */ post( - options?: TaskReactivateTaskCollectionParameters + options?: ReactivateTaskParameters + ): StreamableMethod< + ReactivateTask204Response | ReactivateTaskDefaultResponse + >; +} + +export interface DeleteTaskFile { + /** Deletes the specified Task file from the Compute Node where the Task ran. */ + delete( + options?: DeleteTaskFileParameters + ): StreamableMethod< + DeleteTaskFile200Response | DeleteTaskFileDefaultResponse + >; + /** Returns the content of the specified Task file. */ + get( + options?: GetTaskFileParameters + ): StreamableMethod; + /** Gets the properties of the specified Task file. */ + head( + options?: GetTaskFilePropertiesParameters ): StreamableMethod< - | TaskReactivateTaskCollection204Response - | TaskReactivateTaskCollectionDefaultResponse + GetTaskFileProperties200Response | GetTaskFilePropertiesDefaultResponse >; } -export interface ComputeNodesAddUser { +export interface ListTaskFiles { + /** Lists the files in a Task's directory on its Compute Node. */ + get( + options?: ListTaskFilesParameters + ): StreamableMethod; +} + +export interface CreateNodeUser { /** * You can add a user Account to a Compute Node only when it is in the idle or * running state. */ post( - options: ComputeNodesAddUserParameters + options: CreateNodeUserParameters ): StreamableMethod< - ComputeNodesAddUser201Response | ComputeNodesAddUserDefaultResponse + CreateNodeUser201Response | CreateNodeUserDefaultResponse >; } -export interface ComputeNodesDeleteUser { +export interface DeleteNodeUser { /** * You can delete a user Account to a Compute Node only when it is in the idle or * running state. */ delete( - options?: ComputeNodesDeleteUserParameters + options?: DeleteNodeUserParameters ): StreamableMethod< - ComputeNodesDeleteUser200Response | ComputeNodesDeleteUserDefaultResponse + DeleteNodeUser200Response | DeleteNodeUserDefaultResponse >; /** * This operation replaces of all the updatable properties of the Account. For @@ -1013,73 +904,62 @@ export interface ComputeNodesDeleteUser { * Account on a Compute Node only when it is in the idle or running state. */ put( - options: ComputeNodesUpdateUserParameters + options: ReplaceNodeUserParameters ): StreamableMethod< - ComputeNodesUpdateUser200Response | ComputeNodesUpdateUserDefaultResponse + ReplaceNodeUser200Response | ReplaceNodeUserDefaultResponse >; } -export interface ComputeNodesGetComputeNode { +export interface GetNode { /** Gets information about the specified Compute Node. */ get( - options?: ComputeNodesGetComputeNodeParameters - ): StreamableMethod< - | ComputeNodesGetComputeNode200Response - | ComputeNodesGetComputeNodeDefaultResponse - >; + options?: GetNodeParameters + ): StreamableMethod; } -export interface ComputeNodesRebootComputeNode { +export interface RebootNode { /** You can restart a Compute Node only if it is in an idle or running state. */ post( - options?: ComputeNodesRebootComputeNodeParameters - ): StreamableMethod< - | ComputeNodesRebootComputeNode202Response - | ComputeNodesRebootComputeNodeDefaultResponse - >; + options: RebootNodeParameters + ): StreamableMethod; } -export interface ComputeNodesReimageComputeNode { +export interface ReimageNode { /** * You can reinstall the operating system on a Compute Node only if it is in an * idle or running state. This API can be invoked only on Pools created with the * cloud service configuration property. */ post( - options?: ComputeNodesReimageComputeNodeParameters - ): StreamableMethod< - | ComputeNodesReimageComputeNode202Response - | ComputeNodesReimageComputeNodeDefaultResponse - >; + options: ReimageNodeParameters + ): StreamableMethod; } -export interface ComputeNodesDisableScheduling { +export interface DisableNodeScheduling { /** * You can disable Task scheduling on a Compute Node only if its current * scheduling state is enabled. */ post( - options?: ComputeNodesDisableSchedulingParameters + options: DisableNodeSchedulingParameters ): StreamableMethod< - | ComputeNodesDisableScheduling200Response - | ComputeNodesDisableSchedulingDefaultResponse + DisableNodeScheduling200Response | DisableNodeSchedulingDefaultResponse >; } -export interface ComputeNodesEnableScheduling { +export interface EnableNodeScheduling { /** * You can enable Task scheduling on a Compute Node only if its current scheduling * state is disabled */ post( - options?: ComputeNodesEnableSchedulingParameters + options?: EnableNodeSchedulingParameters ): StreamableMethod< - | ComputeNodesEnableScheduling200Response - | ComputeNodesEnableSchedulingDefaultResponse + EnableNodeScheduling200Response | EnableNodeSchedulingDefaultResponse >; } -export interface ComputeNodesGetRemoteLoginSettings { +export interface GetNodeRemoteLoginSettings { /** * Before you can remotely login to a Compute Node using the remote login * settings, you must create a user Account on the Compute Node. This API can be @@ -1088,14 +968,14 @@ export interface ComputeNodesGetRemoteLoginSettings { * API. */ get( - options?: ComputeNodesGetRemoteLoginSettingsParameters + options?: GetNodeRemoteLoginSettingsParameters ): StreamableMethod< - | ComputeNodesGetRemoteLoginSettings200Response - | ComputeNodesGetRemoteLoginSettingsDefaultResponse + | GetNodeRemoteLoginSettings200Response + | GetNodeRemoteLoginSettingsDefaultResponse >; } -export interface ComputeNodesGetRemoteDesktop { +export interface GetNodeRemoteDesktopFile { /** * Before you can access a Compute Node by using the RDP file, you must create a * user Account on the Compute Node. This API can only be invoked on Pools created @@ -1103,14 +983,14 @@ export interface ComputeNodesGetRemoteDesktop { * configuration, see the GetRemoteLoginSettings API. */ get( - options?: ComputeNodesGetRemoteDesktopParameters + options?: GetNodeRemoteDesktopFileParameters ): StreamableMethod< - | ComputeNodesGetRemoteDesktop200Response - | ComputeNodesGetRemoteDesktopDefaultResponse + | GetNodeRemoteDesktopFile200Response + | GetNodeRemoteDesktopFileDefaultResponse >; } -export interface ComputeNodesUploadBatchServiceLogs { +export interface UploadNodeLogs { /** * This is for gathering Azure Batch service log files in an automated fashion * from Compute Nodes if you are experiencing an error and wish to escalate to @@ -1118,283 +998,300 @@ export interface ComputeNodesUploadBatchServiceLogs { * support to aid in debugging issues with the Batch service. */ post( - options: ComputeNodesUploadBatchServiceLogsParameters + options: UploadNodeLogsParameters ): StreamableMethod< - | ComputeNodesUploadBatchServiceLogs200Response - | ComputeNodesUploadBatchServiceLogsDefaultResponse + UploadNodeLogs200Response | UploadNodeLogsDefaultResponse >; } -export interface ComputeNodesList { +export interface ListNodes { /** Lists the Compute Nodes in the specified Pool. */ get( - options?: ComputeNodesListParameters - ): StreamableMethod< - ComputeNodesList200Response | ComputeNodesListDefaultResponse - >; + options?: ListNodesParameters + ): StreamableMethod; } -export interface ComputeNodeExtensionsGetComputeNodeExtensions { +export interface GetNodeExtension { /** Gets information about the specified Compute Node Extension. */ get( - options?: ComputeNodeExtensionsGetComputeNodeExtensionsParameters + options?: GetNodeExtensionParameters ): StreamableMethod< - | ComputeNodeExtensionsGetComputeNodeExtensions200Response - | ComputeNodeExtensionsGetComputeNodeExtensionsDefaultResponse + GetNodeExtension200Response | GetNodeExtensionDefaultResponse >; } -export interface ComputeNodeExtensionsListComputeNodeExtensions { +export interface ListNodeExtensions { /** Lists the Compute Nodes Extensions in the specified Pool. */ get( - options?: ComputeNodeExtensionsListComputeNodeExtensionsParameters + options?: ListNodeExtensionsParameters ): StreamableMethod< - | ComputeNodeExtensionsListComputeNodeExtensions200Response - | ComputeNodeExtensionsListComputeNodeExtensionsDefaultResponse + ListNodeExtensions200Response | ListNodeExtensionsDefaultResponse >; } +export interface DeleteNodeFile { + /** Deletes the specified file from the Compute Node. */ + delete( + options?: DeleteNodeFileParameters + ): StreamableMethod< + DeleteNodeFile200Response | DeleteNodeFileDefaultResponse + >; + /** Returns the content of the specified Compute Node file. */ + get( + options?: GetNodeFileParameters + ): StreamableMethod; + /** Gets the properties of the specified Compute Node file. */ + head( + options?: GetNodeFilePropertiesParameters + ): StreamableMethod< + GetNodeFileProperties200Response | GetNodeFilePropertiesDefaultResponse + >; +} + +export interface ListNodeFiles { + /** Lists all of the files in Task directories on the specified Compute Node. */ + get( + options?: ListNodeFilesParameters + ): StreamableMethod; +} + export interface Routes { /** Resource for '/applications' has methods for the following verbs: get */ - (path: "/applications"): ApplicationsListApplications; + (path: "/applications"): ListApplications; /** Resource for '/applications/\{applicationId\}' has methods for the following verbs: get */ ( path: "/applications/{applicationId}", applicationId: string - ): ApplicationsGet; + ): GetApplication; /** Resource for '/poolusagemetrics' has methods for the following verbs: get */ - (path: "/poolusagemetrics"): PoolListUsageMetrics; - /** Resource for '/lifetimepoolstats' has methods for the following verbs: get */ - (path: "/lifetimepoolstats"): PoolGetAllPoolLifetimeStatistics; + (path: "/poolusagemetrics"): ListPoolUsageMetrics; /** Resource for '/pools' has methods for the following verbs: post, get */ - (path: "/pools"): PoolAddPool; + (path: "/pools"): CreatePool; /** Resource for '/pools/\{poolId\}' has methods for the following verbs: delete, head, get, patch */ - (path: "/pools/{poolId}", poolId: string): PoolDeletePool; + (path: "/pools/{poolId}", poolId: string): DeletePool; /** Resource for '/pools/\{poolId\}/disableautoscale' has methods for the following verbs: post */ ( path: "/pools/{poolId}/disableautoscale", poolId: string - ): PoolDisableAutoScale; + ): DisablePoolAutoScale; /** Resource for '/pools/\{poolId\}/enableautoscale' has methods for the following verbs: post */ ( path: "/pools/{poolId}/enableautoscale", poolId: string - ): PoolEnableAutoScale; + ): EnablePoolAutoScale; /** Resource for '/pools/\{poolId\}/evaluateautoscale' has methods for the following verbs: post */ ( path: "/pools/{poolId}/evaluateautoscale", poolId: string - ): PoolEvaluateAutoScale; + ): EvaluatePoolAutoScale; /** Resource for '/pools/\{poolId\}/resize' has methods for the following verbs: post */ - (path: "/pools/{poolId}/resize", poolId: string): PoolResize; + (path: "/pools/{poolId}/resize", poolId: string): ResizePool; /** Resource for '/pools/\{poolId\}/stopresize' has methods for the following verbs: post */ - (path: "/pools/{poolId}/stopresize", poolId: string): PoolStopResize; + (path: "/pools/{poolId}/stopresize", poolId: string): StopPoolResize; /** Resource for '/pools/\{poolId\}/updateproperties' has methods for the following verbs: post */ ( path: "/pools/{poolId}/updateproperties", poolId: string - ): PoolUpdateProperties; + ): ReplacePoolProperties; /** Resource for '/pools/\{poolId\}/removenodes' has methods for the following verbs: post */ - (path: "/pools/{poolId}/removenodes", poolId: string): PoolRemoveNodes; + (path: "/pools/{poolId}/removenodes", poolId: string): RemoveNodes; /** Resource for '/supportedimages' has methods for the following verbs: get */ - (path: "/supportedimages"): AccountListSupportedImages; + (path: "/supportedimages"): ListSupportedImages; /** Resource for '/nodecounts' has methods for the following verbs: get */ - (path: "/nodecounts"): AccountListPoolNodeCounts; - /** Resource for '/lifetimejobstats' has methods for the following verbs: get */ - (path: "/lifetimejobstats"): JobGetAllJobLifetimeStatistics; + (path: "/nodecounts"): ListPoolNodeCounts; /** Resource for '/jobs/\{jobId\}' has methods for the following verbs: delete, get, patch, put */ - (path: "/jobs/{jobId}", jobId: string): JobDeleteJob; + (path: "/jobs/{jobId}", jobId: string): DeleteJob; /** Resource for '/jobs/\{jobId\}/disable' has methods for the following verbs: post */ - (path: "/jobs/{jobId}/disable", jobId: string): JobDisableJob; + (path: "/jobs/{jobId}/disable", jobId: string): DisableJob; /** Resource for '/jobs/\{jobId\}/enable' has methods for the following verbs: post */ - (path: "/jobs/{jobId}/enable", jobId: string): JobEnableJob; + (path: "/jobs/{jobId}/enable", jobId: string): EnableJob; /** Resource for '/jobs/\{jobId\}/terminate' has methods for the following verbs: post */ - (path: "/jobs/{jobId}/terminate", jobId: string): JobTerminateJob; + (path: "/jobs/{jobId}/terminate", jobId: string): TerminateJob; /** Resource for '/jobs' has methods for the following verbs: post, get */ - (path: "/jobs"): JobAddJob; + (path: "/jobs"): CreateJob; /** Resource for '/jobschedules/\{jobScheduleId\}/jobs' has methods for the following verbs: get */ ( path: "/jobschedules/{jobScheduleId}/jobs", jobScheduleId: string - ): JobListFromJobSchedule; + ): ListJobsFromSchedule; /** Resource for '/jobs/\{jobId\}/jobpreparationandreleasetaskstatus' has methods for the following verbs: get */ ( path: "/jobs/{jobId}/jobpreparationandreleasetaskstatus", jobId: string - ): JobListPreparationAndReleaseTaskStatus; + ): ListJobPreparationAndReleaseTaskStatus; /** Resource for '/jobs/\{jobId\}/taskcounts' has methods for the following verbs: get */ - (path: "/jobs/{jobId}/taskcounts", jobId: string): JobGetTaskCounts; + (path: "/jobs/{jobId}/taskcounts", jobId: string): GetJobTaskCounts; /** Resource for '/certificates' has methods for the following verbs: post, get */ - (path: "/certificates"): CertificatesAddCertificate; + (path: "/certificates"): CreateCertificate; /** Resource for '/certificates(thumbprintAlgorithm=\{thumbprintAlgorithm\},thumbprint=\{thumbprint\})/canceldelete' has methods for the following verbs: post */ ( path: "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete", thumbprintAlgorithm: string, thumbprint: string - ): CertificatesCancelCertificateDeletion; + ): CancelCertificateDeletion; /** Resource for '/certificates(thumbprintAlgorithm=\{thumbprintAlgorithm\},thumbprint=\{thumbprint\})' has methods for the following verbs: delete, get */ ( path: "/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", thumbprintAlgorithm: string, thumbprint: string - ): CertificatesDeleteCertificate; - /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/files/\{filePath\}' has methods for the following verbs: delete, get, head */ - ( - path: "/jobs/{jobId}/tasks/{taskId}/files/{filePath}", - jobId: string, - taskId: string, - filePath: string - ): FileDeleteFromTask; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/files/\{filePath\}' has methods for the following verbs: delete, get, head */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/files/{filePath}", - poolId: string, - nodeId: string, - filePath: string - ): FileDeleteFromComputeNode; - /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/files' has methods for the following verbs: get */ - ( - path: "/jobs/{jobId}/tasks/{taskId}/files", - jobId: string, - taskId: string - ): FileListFromTask; - /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/files' has methods for the following verbs: get */ - ( - path: "/pools/{poolId}/nodes/{nodeId}/files", - poolId: string, - nodeId: string - ): FileListFromComputeNode; + ): DeleteCertificate; /** Resource for '/jobschedules/\{jobScheduleId\}' has methods for the following verbs: head, delete, get, patch, put */ ( path: "/jobschedules/{jobScheduleId}", jobScheduleId: string - ): JobScheduleJobScheduleExists; + ): JobScheduleExists; /** Resource for '/jobschedules/\{jobScheduleId\}/disable' has methods for the following verbs: post */ ( path: "/jobschedules/{jobScheduleId}/disable", jobScheduleId: string - ): JobScheduleDisableJobSchedule; + ): DisableJobSchedule; /** Resource for '/jobschedules/\{jobScheduleId\}/enable' has methods for the following verbs: post */ ( path: "/jobschedules/{jobScheduleId}/enable", jobScheduleId: string - ): JobScheduleEnableJobSchedule; + ): EnableJobSchedule; /** Resource for '/jobschedules/\{jobScheduleId\}/terminate' has methods for the following verbs: post */ ( path: "/jobschedules/{jobScheduleId}/terminate", jobScheduleId: string - ): JobScheduleTerminateJobSchedule; + ): TerminateJobSchedule; /** Resource for '/jobschedules' has methods for the following verbs: post, get */ - (path: "/jobschedules"): JobScheduleAddJobSchedule; + (path: "/jobschedules"): CreateJobSchedule; /** Resource for '/jobs/\{jobId\}/tasks' has methods for the following verbs: post, get */ - (path: "/jobs/{jobId}/tasks", jobId: string): TaskAddTask; + (path: "/jobs/{jobId}/tasks", jobId: string): CreateTask; /** Resource for '/jobs/\{jobId\}/addtaskcollection' has methods for the following verbs: post */ ( path: "/jobs/{jobId}/addtaskcollection", jobId: string - ): TaskAddTaskCollection; + ): CreateTaskCollection; /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}' has methods for the following verbs: delete, get, put */ ( path: "/jobs/{jobId}/tasks/{taskId}", jobId: string, taskId: string - ): TaskDeleteTaskCollection; + ): DeleteTask; /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/subtasksinfo' has methods for the following verbs: get */ ( path: "/jobs/{jobId}/tasks/{taskId}/subtasksinfo", jobId: string, taskId: string - ): TaskListSubtasks; + ): ListSubTasks; /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/terminate' has methods for the following verbs: post */ ( path: "/jobs/{jobId}/tasks/{taskId}/terminate", jobId: string, taskId: string - ): TaskTerminateTaskCollection; + ): TerminateTask; /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/reactivate' has methods for the following verbs: post */ ( path: "/jobs/{jobId}/tasks/{taskId}/reactivate", jobId: string, taskId: string - ): TaskReactivateTaskCollection; + ): ReactivateTask; + /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/files/\{filePath\}' has methods for the following verbs: delete, get, head */ + ( + path: "/jobs/{jobId}/tasks/{taskId}/files/{filePath}", + jobId: string, + taskId: string, + filePath: string + ): DeleteTaskFile; + /** Resource for '/jobs/\{jobId\}/tasks/\{taskId\}/files' has methods for the following verbs: get */ + ( + path: "/jobs/{jobId}/tasks/{taskId}/files", + jobId: string, + taskId: string + ): ListTaskFiles; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/users' has methods for the following verbs: post */ ( path: "/pools/{poolId}/nodes/{nodeId}/users", poolId: string, nodeId: string - ): ComputeNodesAddUser; + ): CreateNodeUser; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/users/\{userName\}' has methods for the following verbs: delete, put */ ( path: "/pools/{poolId}/nodes/{nodeId}/users/{userName}", poolId: string, nodeId: string, userName: string - ): ComputeNodesDeleteUser; + ): DeleteNodeUser; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}' has methods for the following verbs: get */ ( path: "/pools/{poolId}/nodes/{nodeId}", poolId: string, nodeId: string - ): ComputeNodesGetComputeNode; + ): GetNode; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/reboot' has methods for the following verbs: post */ ( path: "/pools/{poolId}/nodes/{nodeId}/reboot", poolId: string, nodeId: string - ): ComputeNodesRebootComputeNode; + ): RebootNode; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/reimage' has methods for the following verbs: post */ ( path: "/pools/{poolId}/nodes/{nodeId}/reimage", poolId: string, nodeId: string - ): ComputeNodesReimageComputeNode; + ): ReimageNode; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/disablescheduling' has methods for the following verbs: post */ ( path: "/pools/{poolId}/nodes/{nodeId}/disablescheduling", poolId: string, nodeId: string - ): ComputeNodesDisableScheduling; + ): DisableNodeScheduling; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/enablescheduling' has methods for the following verbs: post */ ( path: "/pools/{poolId}/nodes/{nodeId}/enablescheduling", poolId: string, nodeId: string - ): ComputeNodesEnableScheduling; + ): EnableNodeScheduling; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/remoteloginsettings' has methods for the following verbs: get */ ( path: "/pools/{poolId}/nodes/{nodeId}/remoteloginsettings", poolId: string, nodeId: string - ): ComputeNodesGetRemoteLoginSettings; + ): GetNodeRemoteLoginSettings; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/rdp' has methods for the following verbs: get */ ( path: "/pools/{poolId}/nodes/{nodeId}/rdp", poolId: string, nodeId: string - ): ComputeNodesGetRemoteDesktop; + ): GetNodeRemoteDesktopFile; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/uploadbatchservicelogs' has methods for the following verbs: post */ ( path: "/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs", poolId: string, nodeId: string - ): ComputeNodesUploadBatchServiceLogs; + ): UploadNodeLogs; /** Resource for '/pools/\{poolId\}/nodes' has methods for the following verbs: get */ - (path: "/pools/{poolId}/nodes", poolId: string): ComputeNodesList; + (path: "/pools/{poolId}/nodes", poolId: string): ListNodes; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/extensions/\{extensionName\}' has methods for the following verbs: get */ ( path: "/pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}", poolId: string, nodeId: string, extensionName: string - ): ComputeNodeExtensionsGetComputeNodeExtensions; + ): GetNodeExtension; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/extensions' has methods for the following verbs: get */ ( path: "/pools/{poolId}/nodes/{nodeId}/extensions", poolId: string, nodeId: string - ): ComputeNodeExtensionsListComputeNodeExtensions; + ): ListNodeExtensions; + /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/files/\{filePath\}' has methods for the following verbs: delete, get, head */ + ( + path: "/pools/{poolId}/nodes/{nodeId}/files/{filePath}", + poolId: string, + nodeId: string, + filePath: string + ): DeleteNodeFile; + /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/files' has methods for the following verbs: get */ + ( + path: "/pools/{poolId}/nodes/{nodeId}/files", + poolId: string, + nodeId: string + ): ListNodeFiles; } -export type BatchServiceContext = Client & { +export type BatchContext = Client & { path: Routes; }; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/index.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/index.ts index 848da4b690..93712575e2 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/index.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/index.ts @@ -1,9 +1,9 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import BatchServiceClient from "./batchServiceClient.js"; +import BatchClient from "./batchClient.js"; -export * from "./batchServiceClient.js"; +export * from "./batchClient.js"; export * from "./parameters.js"; export * from "./responses.js"; export * from "./clientDefinitions.js"; @@ -12,4 +12,4 @@ export * from "./models.js"; export * from "./outputModels.js"; export * from "./paginateHelper.js"; -export default BatchServiceClient; +export default BatchClient; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/isUnexpected.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/isUnexpected.ts index 278090397c..3692474257 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/isUnexpected.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/isUnexpected.ts @@ -2,171 +2,166 @@ // Licensed under the MIT license. import { - ApplicationsListApplications200Response, - ApplicationsListApplicationsDefaultResponse, - ApplicationsGet200Response, - ApplicationsGetDefaultResponse, - PoolListUsageMetrics200Response, - PoolListUsageMetricsDefaultResponse, - PoolGetAllPoolLifetimeStatistics200Response, - PoolGetAllPoolLifetimeStatisticsDefaultResponse, - PoolAddPool201Response, - PoolAddPoolDefaultResponse, - PoolListPools200Response, - PoolListPoolsDefaultResponse, - PoolDeletePool202Response, - PoolDeletePoolDefaultResponse, + ListApplications200Response, + ListApplicationsDefaultResponse, + GetApplication200Response, + GetApplicationDefaultResponse, + ListPoolUsageMetrics200Response, + ListPoolUsageMetricsDefaultResponse, + CreatePool201Response, + CreatePoolDefaultResponse, + ListPools200Response, + ListPoolsDefaultResponse, + DeletePool202Response, + DeletePoolDefaultResponse, PoolExists200Response, PoolExists404Response, PoolExistsDefaultResponse, - PoolGetPool200Response, - PoolGetPoolDefaultResponse, - PoolPatchPool200Response, - PoolPatchPoolDefaultResponse, - PoolDisableAutoScale200Response, - PoolDisableAutoScaleDefaultResponse, - PoolEnableAutoScale200Response, - PoolEnableAutoScaleDefaultResponse, - PoolEvaluateAutoScale200Response, - PoolEvaluateAutoScaleDefaultResponse, - PoolResize202Response, - PoolResizeDefaultResponse, - PoolStopResize202Response, - PoolStopResizeDefaultResponse, - PoolUpdateProperties204Response, - PoolUpdatePropertiesDefaultResponse, - PoolRemoveNodes202Response, - PoolRemoveNodesDefaultResponse, - AccountListSupportedImages200Response, - AccountListSupportedImagesDefaultResponse, - AccountListPoolNodeCounts200Response, - AccountListPoolNodeCountsDefaultResponse, - JobGetAllJobLifetimeStatistics200Response, - JobGetAllJobLifetimeStatisticsDefaultResponse, - JobDeleteJob202Response, - JobDeleteJobDefaultResponse, - JobGetJob200Response, - JobGetJobDefaultResponse, - JobPatchJob200Response, - JobPatchJobDefaultResponse, - JobUpdateJob200Response, - JobUpdateJobDefaultResponse, - JobDisableJob202Response, - JobDisableJobDefaultResponse, - JobEnableJob202Response, - JobEnableJobDefaultResponse, - JobTerminateJob202Response, - JobTerminateJobDefaultResponse, - JobAddJob201Response, - JobAddJobDefaultResponse, - JobListJobs200Response, - JobListJobsDefaultResponse, - JobListFromJobSchedule200Response, - JobListFromJobScheduleDefaultResponse, - JobListPreparationAndReleaseTaskStatus200Response, - JobListPreparationAndReleaseTaskStatusDefaultResponse, - JobGetTaskCounts200Response, - JobGetTaskCountsDefaultResponse, - CertificatesAddCertificate201Response, - CertificatesAddCertificateDefaultResponse, - CertificatesListCertificates200Response, - CertificatesListCertificatesDefaultResponse, - CertificatesCancelCertificateDeletion204Response, - CertificatesCancelCertificateDeletionDefaultResponse, - CertificatesDeleteCertificate202Response, - CertificatesDeleteCertificateDefaultResponse, - CertificatesGetCertificate200Response, - CertificatesGetCertificateDefaultResponse, - FileDeleteFromTask200Response, - FileDeleteFromTaskDefaultResponse, - FileGetFromTask200Response, - FileGetFromTaskDefaultResponse, - FileGetPropertiesFromTask200Response, - FileGetPropertiesFromTaskDefaultResponse, - FileDeleteFromComputeNode200Response, - FileDeleteFromComputeNodeDefaultResponse, - FileGetFromComputeNode200Response, - FileGetFromComputeNodeDefaultResponse, - FileGetPropertiesFromComputeNode200Response, - FileGetPropertiesFromComputeNodeDefaultResponse, - FileListFromTask200Response, - FileListFromTaskDefaultResponse, - FileListFromComputeNode200Response, - FileListFromComputeNodeDefaultResponse, - JobScheduleJobScheduleExists200Response, - JobScheduleJobScheduleExists404Response, - JobScheduleJobScheduleExistsDefaultResponse, - JobScheduleDeleteJobSchedule202Response, - JobScheduleDeleteJobScheduleDefaultResponse, - JobScheduleGetJobSchedule200Response, - JobScheduleGetJobScheduleDefaultResponse, - JobSchedulePatchJobSchedule200Response, - JobSchedulePatchJobScheduleDefaultResponse, - JobScheduleUpdateJobSchedule200Response, - JobScheduleUpdateJobScheduleDefaultResponse, - JobScheduleDisableJobSchedule204Response, - JobScheduleDisableJobScheduleDefaultResponse, - JobScheduleEnableJobSchedule204Response, - JobScheduleEnableJobScheduleDefaultResponse, - JobScheduleTerminateJobSchedule202Response, - JobScheduleTerminateJobScheduleDefaultResponse, - JobScheduleAddJobSchedule201Response, - JobScheduleAddJobScheduleDefaultResponse, - JobScheduleListJobSchedules200Response, - JobScheduleListJobSchedulesDefaultResponse, - TaskAddTask201Response, - TaskAddTaskDefaultResponse, - TaskListTasks200Response, - TaskListTasksDefaultResponse, - TaskAddTaskCollection200Response, - TaskAddTaskCollectionDefaultResponse, - TaskDeleteTaskCollection200Response, - TaskDeleteTaskCollectionDefaultResponse, - TaskGetTaskCollection200Response, - TaskGetTaskCollectionDefaultResponse, - TaskUpdateTaskCollection200Response, - TaskUpdateTaskCollectionDefaultResponse, - TaskListSubtasks200Response, - TaskListSubtasksDefaultResponse, - TaskTerminateTaskCollection204Response, - TaskTerminateTaskCollectionDefaultResponse, - TaskReactivateTaskCollection204Response, - TaskReactivateTaskCollectionDefaultResponse, - ComputeNodesAddUser201Response, - ComputeNodesAddUserDefaultResponse, - ComputeNodesDeleteUser200Response, - ComputeNodesDeleteUserDefaultResponse, - ComputeNodesUpdateUser200Response, - ComputeNodesUpdateUserDefaultResponse, - ComputeNodesGetComputeNode200Response, - ComputeNodesGetComputeNodeDefaultResponse, - ComputeNodesRebootComputeNode202Response, - ComputeNodesRebootComputeNodeDefaultResponse, - ComputeNodesReimageComputeNode202Response, - ComputeNodesReimageComputeNodeDefaultResponse, - ComputeNodesDisableScheduling200Response, - ComputeNodesDisableSchedulingDefaultResponse, - ComputeNodesEnableScheduling200Response, - ComputeNodesEnableSchedulingDefaultResponse, - ComputeNodesGetRemoteLoginSettings200Response, - ComputeNodesGetRemoteLoginSettingsDefaultResponse, - ComputeNodesGetRemoteDesktop200Response, - ComputeNodesGetRemoteDesktopDefaultResponse, - ComputeNodesUploadBatchServiceLogs200Response, - ComputeNodesUploadBatchServiceLogsDefaultResponse, - ComputeNodesList200Response, - ComputeNodesListDefaultResponse, - ComputeNodeExtensionsGetComputeNodeExtensions200Response, - ComputeNodeExtensionsGetComputeNodeExtensionsDefaultResponse, - ComputeNodeExtensionsListComputeNodeExtensions200Response, - ComputeNodeExtensionsListComputeNodeExtensionsDefaultResponse, + GetPool200Response, + GetPoolDefaultResponse, + UpdatePool200Response, + UpdatePoolDefaultResponse, + DisablePoolAutoScale200Response, + DisablePoolAutoScaleDefaultResponse, + EnablePoolAutoScale200Response, + EnablePoolAutoScaleDefaultResponse, + EvaluatePoolAutoScale200Response, + EvaluatePoolAutoScaleDefaultResponse, + ResizePool202Response, + ResizePoolDefaultResponse, + StopPoolResize202Response, + StopPoolResizeDefaultResponse, + ReplacePoolProperties204Response, + ReplacePoolPropertiesDefaultResponse, + RemoveNodes202Response, + RemoveNodesDefaultResponse, + ListSupportedImages200Response, + ListSupportedImagesDefaultResponse, + ListPoolNodeCounts200Response, + ListPoolNodeCountsDefaultResponse, + DeleteJob202Response, + DeleteJobDefaultResponse, + GetJob200Response, + GetJobDefaultResponse, + UpdateJob200Response, + UpdateJobDefaultResponse, + ReplaceJob200Response, + ReplaceJobDefaultResponse, + DisableJob202Response, + DisableJobDefaultResponse, + EnableJob202Response, + EnableJobDefaultResponse, + TerminateJob202Response, + TerminateJobDefaultResponse, + CreateJob201Response, + CreateJobDefaultResponse, + ListJobs200Response, + ListJobsDefaultResponse, + ListJobsFromSchedule200Response, + ListJobsFromScheduleDefaultResponse, + ListJobPreparationAndReleaseTaskStatus200Response, + ListJobPreparationAndReleaseTaskStatusDefaultResponse, + GetJobTaskCounts200Response, + GetJobTaskCountsDefaultResponse, + CreateCertificate201Response, + CreateCertificateDefaultResponse, + ListCertificates200Response, + ListCertificatesDefaultResponse, + CancelCertificateDeletion204Response, + CancelCertificateDeletionDefaultResponse, + DeleteCertificate202Response, + DeleteCertificateDefaultResponse, + GetCertificate200Response, + GetCertificateDefaultResponse, + JobScheduleExists200Response, + JobScheduleExists404Response, + JobScheduleExistsDefaultResponse, + DeleteJobSchedule202Response, + DeleteJobScheduleDefaultResponse, + GetJobSchedule200Response, + GetJobScheduleDefaultResponse, + UpdateJobSchedule200Response, + UpdateJobScheduleDefaultResponse, + ReplaceJobSchedule200Response, + ReplaceJobScheduleDefaultResponse, + DisableJobSchedule204Response, + DisableJobScheduleDefaultResponse, + EnableJobSchedule204Response, + EnableJobScheduleDefaultResponse, + TerminateJobSchedule202Response, + TerminateJobScheduleDefaultResponse, + CreateJobSchedule201Response, + CreateJobScheduleDefaultResponse, + ListJobSchedules200Response, + ListJobSchedulesDefaultResponse, + CreateTask201Response, + CreateTaskDefaultResponse, + ListTasks200Response, + ListTasksDefaultResponse, + CreateTaskCollection200Response, + CreateTaskCollectionDefaultResponse, + DeleteTask200Response, + DeleteTaskDefaultResponse, + GetTask200Response, + GetTaskDefaultResponse, + ReplaceTask200Response, + ReplaceTaskDefaultResponse, + ListSubTasks200Response, + ListSubTasksDefaultResponse, + TerminateTask204Response, + TerminateTaskDefaultResponse, + ReactivateTask204Response, + ReactivateTaskDefaultResponse, + DeleteTaskFile200Response, + DeleteTaskFileDefaultResponse, + GetTaskFile200Response, + GetTaskFileDefaultResponse, + GetTaskFileProperties200Response, + GetTaskFilePropertiesDefaultResponse, + ListTaskFiles200Response, + ListTaskFilesDefaultResponse, + CreateNodeUser201Response, + CreateNodeUserDefaultResponse, + DeleteNodeUser200Response, + DeleteNodeUserDefaultResponse, + ReplaceNodeUser200Response, + ReplaceNodeUserDefaultResponse, + GetNode200Response, + GetNodeDefaultResponse, + RebootNode202Response, + RebootNodeDefaultResponse, + ReimageNode202Response, + ReimageNodeDefaultResponse, + DisableNodeScheduling200Response, + DisableNodeSchedulingDefaultResponse, + EnableNodeScheduling200Response, + EnableNodeSchedulingDefaultResponse, + GetNodeRemoteLoginSettings200Response, + GetNodeRemoteLoginSettingsDefaultResponse, + GetNodeRemoteDesktopFile200Response, + GetNodeRemoteDesktopFileDefaultResponse, + UploadNodeLogs200Response, + UploadNodeLogsDefaultResponse, + ListNodes200Response, + ListNodesDefaultResponse, + GetNodeExtension200Response, + GetNodeExtensionDefaultResponse, + ListNodeExtensions200Response, + ListNodeExtensionsDefaultResponse, + DeleteNodeFile200Response, + DeleteNodeFileDefaultResponse, + GetNodeFile200Response, + GetNodeFileDefaultResponse, + GetNodeFileProperties200Response, + GetNodeFilePropertiesDefaultResponse, + ListNodeFiles200Response, + ListNodeFilesDefaultResponse, } from "./responses.js"; const responseMap: Record = { "GET /applications": ["200"], "GET /applications/{applicationId}": ["200"], "GET /poolusagemetrics": ["200"], - "GET /lifetimepoolstats": ["200"], "POST /pools": ["201"], "GET /pools": ["200"], "DELETE /pools/{poolId}": ["202"], @@ -182,7 +177,6 @@ const responseMap: Record = { "POST /pools/{poolId}/removenodes": ["202"], "GET /supportedimages": ["200"], "GET /nodecounts": ["200"], - "GET /lifetimejobstats": ["200"], "DELETE /jobs/{jobId}": ["202"], "GET /jobs/{jobId}": ["200"], "PATCH /jobs/{jobId}": ["200"], @@ -203,14 +197,6 @@ const responseMap: Record = { ["202"], "GET /certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})": ["200"], - "DELETE /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], - "GET /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], - "HEAD /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], - "DELETE /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], - "HEAD /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], - "GET /jobs/{jobId}/tasks/{taskId}/files": ["200"], - "GET /pools/{poolId}/nodes/{nodeId}/files": ["200"], "HEAD /jobschedules/{jobScheduleId}": ["200", "404"], "DELETE /jobschedules/{jobScheduleId}": ["202"], "GET /jobschedules/{jobScheduleId}": ["200"], @@ -230,6 +216,10 @@ const responseMap: Record = { "GET /jobs/{jobId}/tasks/{taskId}/subtasksinfo": ["200"], "POST /jobs/{jobId}/tasks/{taskId}/terminate": ["204"], "POST /jobs/{jobId}/tasks/{taskId}/reactivate": ["204"], + "DELETE /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], + "GET /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], + "HEAD /jobs/{jobId}/tasks/{taskId}/files/{filePath}": ["200"], + "GET /jobs/{jobId}/tasks/{taskId}/files": ["200"], "POST /pools/{poolId}/nodes/{nodeId}/users": ["201"], "DELETE /pools/{poolId}/nodes/{nodeId}/users/{userName}": ["200"], "PUT /pools/{poolId}/nodes/{nodeId}/users/{userName}": ["200"], @@ -244,35 +234,32 @@ const responseMap: Record = { "GET /pools/{poolId}/nodes": ["200"], "GET /pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}": ["200"], "GET /pools/{poolId}/nodes/{nodeId}/extensions": ["200"], + "DELETE /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], + "GET /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], + "HEAD /pools/{poolId}/nodes/{nodeId}/files/{filePath}": ["200"], + "GET /pools/{poolId}/nodes/{nodeId}/files": ["200"], }; export function isUnexpected( - response: - | ApplicationsListApplications200Response - | ApplicationsListApplicationsDefaultResponse -): response is ApplicationsListApplicationsDefaultResponse; + response: ListApplications200Response | ListApplicationsDefaultResponse +): response is ListApplicationsDefaultResponse; export function isUnexpected( - response: ApplicationsGet200Response | ApplicationsGetDefaultResponse -): response is ApplicationsGetDefaultResponse; + response: GetApplication200Response | GetApplicationDefaultResponse +): response is GetApplicationDefaultResponse; export function isUnexpected( response: - | PoolListUsageMetrics200Response - | PoolListUsageMetricsDefaultResponse -): response is PoolListUsageMetricsDefaultResponse; + | ListPoolUsageMetrics200Response + | ListPoolUsageMetricsDefaultResponse +): response is ListPoolUsageMetricsDefaultResponse; export function isUnexpected( - response: - | PoolGetAllPoolLifetimeStatistics200Response - | PoolGetAllPoolLifetimeStatisticsDefaultResponse -): response is PoolGetAllPoolLifetimeStatisticsDefaultResponse; + response: CreatePool201Response | CreatePoolDefaultResponse +): response is CreatePoolDefaultResponse; export function isUnexpected( - response: PoolAddPool201Response | PoolAddPoolDefaultResponse -): response is PoolAddPoolDefaultResponse; + response: ListPools200Response | ListPoolsDefaultResponse +): response is ListPoolsDefaultResponse; export function isUnexpected( - response: PoolListPools200Response | PoolListPoolsDefaultResponse -): response is PoolListPoolsDefaultResponse; -export function isUnexpected( - response: PoolDeletePool202Response | PoolDeletePoolDefaultResponse -): response is PoolDeletePoolDefaultResponse; + response: DeletePool202Response | DeletePoolDefaultResponse +): response is DeletePoolDefaultResponse; export function isUnexpected( response: | PoolExists200Response @@ -280,547 +267,476 @@ export function isUnexpected( | PoolExistsDefaultResponse ): response is PoolExistsDefaultResponse; export function isUnexpected( - response: PoolGetPool200Response | PoolGetPoolDefaultResponse -): response is PoolGetPoolDefaultResponse; + response: GetPool200Response | GetPoolDefaultResponse +): response is GetPoolDefaultResponse; export function isUnexpected( - response: PoolPatchPool200Response | PoolPatchPoolDefaultResponse -): response is PoolPatchPoolDefaultResponse; + response: UpdatePool200Response | UpdatePoolDefaultResponse +): response is UpdatePoolDefaultResponse; export function isUnexpected( response: - | PoolDisableAutoScale200Response - | PoolDisableAutoScaleDefaultResponse -): response is PoolDisableAutoScaleDefaultResponse; + | DisablePoolAutoScale200Response + | DisablePoolAutoScaleDefaultResponse +): response is DisablePoolAutoScaleDefaultResponse; export function isUnexpected( - response: PoolEnableAutoScale200Response | PoolEnableAutoScaleDefaultResponse -): response is PoolEnableAutoScaleDefaultResponse; + response: EnablePoolAutoScale200Response | EnablePoolAutoScaleDefaultResponse +): response is EnablePoolAutoScaleDefaultResponse; export function isUnexpected( response: - | PoolEvaluateAutoScale200Response - | PoolEvaluateAutoScaleDefaultResponse -): response is PoolEvaluateAutoScaleDefaultResponse; + | EvaluatePoolAutoScale200Response + | EvaluatePoolAutoScaleDefaultResponse +): response is EvaluatePoolAutoScaleDefaultResponse; export function isUnexpected( - response: PoolResize202Response | PoolResizeDefaultResponse -): response is PoolResizeDefaultResponse; + response: ResizePool202Response | ResizePoolDefaultResponse +): response is ResizePoolDefaultResponse; export function isUnexpected( - response: PoolStopResize202Response | PoolStopResizeDefaultResponse -): response is PoolStopResizeDefaultResponse; + response: StopPoolResize202Response | StopPoolResizeDefaultResponse +): response is StopPoolResizeDefaultResponse; export function isUnexpected( response: - | PoolUpdateProperties204Response - | PoolUpdatePropertiesDefaultResponse -): response is PoolUpdatePropertiesDefaultResponse; + | ReplacePoolProperties204Response + | ReplacePoolPropertiesDefaultResponse +): response is ReplacePoolPropertiesDefaultResponse; export function isUnexpected( - response: PoolRemoveNodes202Response | PoolRemoveNodesDefaultResponse -): response is PoolRemoveNodesDefaultResponse; + response: RemoveNodes202Response | RemoveNodesDefaultResponse +): response is RemoveNodesDefaultResponse; export function isUnexpected( - response: - | AccountListSupportedImages200Response - | AccountListSupportedImagesDefaultResponse -): response is AccountListSupportedImagesDefaultResponse; + response: ListSupportedImages200Response | ListSupportedImagesDefaultResponse +): response is ListSupportedImagesDefaultResponse; export function isUnexpected( - response: - | AccountListPoolNodeCounts200Response - | AccountListPoolNodeCountsDefaultResponse -): response is AccountListPoolNodeCountsDefaultResponse; + response: ListPoolNodeCounts200Response | ListPoolNodeCountsDefaultResponse +): response is ListPoolNodeCountsDefaultResponse; export function isUnexpected( - response: - | JobGetAllJobLifetimeStatistics200Response - | JobGetAllJobLifetimeStatisticsDefaultResponse -): response is JobGetAllJobLifetimeStatisticsDefaultResponse; -export function isUnexpected( - response: JobDeleteJob202Response | JobDeleteJobDefaultResponse -): response is JobDeleteJobDefaultResponse; + response: DeleteJob202Response | DeleteJobDefaultResponse +): response is DeleteJobDefaultResponse; export function isUnexpected( - response: JobGetJob200Response | JobGetJobDefaultResponse -): response is JobGetJobDefaultResponse; + response: GetJob200Response | GetJobDefaultResponse +): response is GetJobDefaultResponse; export function isUnexpected( - response: JobPatchJob200Response | JobPatchJobDefaultResponse -): response is JobPatchJobDefaultResponse; + response: UpdateJob200Response | UpdateJobDefaultResponse +): response is UpdateJobDefaultResponse; export function isUnexpected( - response: JobUpdateJob200Response | JobUpdateJobDefaultResponse -): response is JobUpdateJobDefaultResponse; + response: ReplaceJob200Response | ReplaceJobDefaultResponse +): response is ReplaceJobDefaultResponse; export function isUnexpected( - response: JobDisableJob202Response | JobDisableJobDefaultResponse -): response is JobDisableJobDefaultResponse; + response: DisableJob202Response | DisableJobDefaultResponse +): response is DisableJobDefaultResponse; export function isUnexpected( - response: JobEnableJob202Response | JobEnableJobDefaultResponse -): response is JobEnableJobDefaultResponse; + response: EnableJob202Response | EnableJobDefaultResponse +): response is EnableJobDefaultResponse; export function isUnexpected( - response: JobTerminateJob202Response | JobTerminateJobDefaultResponse -): response is JobTerminateJobDefaultResponse; + response: TerminateJob202Response | TerminateJobDefaultResponse +): response is TerminateJobDefaultResponse; export function isUnexpected( - response: JobAddJob201Response | JobAddJobDefaultResponse -): response is JobAddJobDefaultResponse; + response: CreateJob201Response | CreateJobDefaultResponse +): response is CreateJobDefaultResponse; export function isUnexpected( - response: JobListJobs200Response | JobListJobsDefaultResponse -): response is JobListJobsDefaultResponse; + response: ListJobs200Response | ListJobsDefaultResponse +): response is ListJobsDefaultResponse; export function isUnexpected( response: - | JobListFromJobSchedule200Response - | JobListFromJobScheduleDefaultResponse -): response is JobListFromJobScheduleDefaultResponse; + | ListJobsFromSchedule200Response + | ListJobsFromScheduleDefaultResponse +): response is ListJobsFromScheduleDefaultResponse; export function isUnexpected( response: - | JobListPreparationAndReleaseTaskStatus200Response - | JobListPreparationAndReleaseTaskStatusDefaultResponse -): response is JobListPreparationAndReleaseTaskStatusDefaultResponse; + | ListJobPreparationAndReleaseTaskStatus200Response + | ListJobPreparationAndReleaseTaskStatusDefaultResponse +): response is ListJobPreparationAndReleaseTaskStatusDefaultResponse; export function isUnexpected( - response: JobGetTaskCounts200Response | JobGetTaskCountsDefaultResponse -): response is JobGetTaskCountsDefaultResponse; + response: GetJobTaskCounts200Response | GetJobTaskCountsDefaultResponse +): response is GetJobTaskCountsDefaultResponse; export function isUnexpected( - response: - | CertificatesAddCertificate201Response - | CertificatesAddCertificateDefaultResponse -): response is CertificatesAddCertificateDefaultResponse; + response: CreateCertificate201Response | CreateCertificateDefaultResponse +): response is CreateCertificateDefaultResponse; export function isUnexpected( - response: - | CertificatesListCertificates200Response - | CertificatesListCertificatesDefaultResponse -): response is CertificatesListCertificatesDefaultResponse; + response: ListCertificates200Response | ListCertificatesDefaultResponse +): response is ListCertificatesDefaultResponse; export function isUnexpected( response: - | CertificatesCancelCertificateDeletion204Response - | CertificatesCancelCertificateDeletionDefaultResponse -): response is CertificatesCancelCertificateDeletionDefaultResponse; + | CancelCertificateDeletion204Response + | CancelCertificateDeletionDefaultResponse +): response is CancelCertificateDeletionDefaultResponse; export function isUnexpected( - response: - | CertificatesDeleteCertificate202Response - | CertificatesDeleteCertificateDefaultResponse -): response is CertificatesDeleteCertificateDefaultResponse; + response: DeleteCertificate202Response | DeleteCertificateDefaultResponse +): response is DeleteCertificateDefaultResponse; export function isUnexpected( - response: - | CertificatesGetCertificate200Response - | CertificatesGetCertificateDefaultResponse -): response is CertificatesGetCertificateDefaultResponse; + response: GetCertificate200Response | GetCertificateDefaultResponse +): response is GetCertificateDefaultResponse; export function isUnexpected( - response: FileDeleteFromTask200Response | FileDeleteFromTaskDefaultResponse -): response is FileDeleteFromTaskDefaultResponse; + response: + | JobScheduleExists200Response + | JobScheduleExists404Response + | JobScheduleExistsDefaultResponse +): response is JobScheduleExistsDefaultResponse; export function isUnexpected( - response: FileGetFromTask200Response | FileGetFromTaskDefaultResponse -): response is FileGetFromTaskDefaultResponse; + response: DeleteJobSchedule202Response | DeleteJobScheduleDefaultResponse +): response is DeleteJobScheduleDefaultResponse; export function isUnexpected( - response: - | FileGetPropertiesFromTask200Response - | FileGetPropertiesFromTaskDefaultResponse -): response is FileGetPropertiesFromTaskDefaultResponse; + response: GetJobSchedule200Response | GetJobScheduleDefaultResponse +): response is GetJobScheduleDefaultResponse; export function isUnexpected( - response: - | FileDeleteFromComputeNode200Response - | FileDeleteFromComputeNodeDefaultResponse -): response is FileDeleteFromComputeNodeDefaultResponse; + response: UpdateJobSchedule200Response | UpdateJobScheduleDefaultResponse +): response is UpdateJobScheduleDefaultResponse; export function isUnexpected( - response: - | FileGetFromComputeNode200Response - | FileGetFromComputeNodeDefaultResponse -): response is FileGetFromComputeNodeDefaultResponse; + response: ReplaceJobSchedule200Response | ReplaceJobScheduleDefaultResponse +): response is ReplaceJobScheduleDefaultResponse; export function isUnexpected( - response: - | FileGetPropertiesFromComputeNode200Response - | FileGetPropertiesFromComputeNodeDefaultResponse -): response is FileGetPropertiesFromComputeNodeDefaultResponse; + response: DisableJobSchedule204Response | DisableJobScheduleDefaultResponse +): response is DisableJobScheduleDefaultResponse; export function isUnexpected( - response: FileListFromTask200Response | FileListFromTaskDefaultResponse -): response is FileListFromTaskDefaultResponse; + response: EnableJobSchedule204Response | EnableJobScheduleDefaultResponse +): response is EnableJobScheduleDefaultResponse; export function isUnexpected( response: - | FileListFromComputeNode200Response - | FileListFromComputeNodeDefaultResponse -): response is FileListFromComputeNodeDefaultResponse; + | TerminateJobSchedule202Response + | TerminateJobScheduleDefaultResponse +): response is TerminateJobScheduleDefaultResponse; export function isUnexpected( - response: - | JobScheduleJobScheduleExists200Response - | JobScheduleJobScheduleExists404Response - | JobScheduleJobScheduleExistsDefaultResponse -): response is JobScheduleJobScheduleExistsDefaultResponse; + response: CreateJobSchedule201Response | CreateJobScheduleDefaultResponse +): response is CreateJobScheduleDefaultResponse; export function isUnexpected( - response: - | JobScheduleDeleteJobSchedule202Response - | JobScheduleDeleteJobScheduleDefaultResponse -): response is JobScheduleDeleteJobScheduleDefaultResponse; + response: ListJobSchedules200Response | ListJobSchedulesDefaultResponse +): response is ListJobSchedulesDefaultResponse; export function isUnexpected( - response: - | JobScheduleGetJobSchedule200Response - | JobScheduleGetJobScheduleDefaultResponse -): response is JobScheduleGetJobScheduleDefaultResponse; + response: CreateTask201Response | CreateTaskDefaultResponse +): response is CreateTaskDefaultResponse; export function isUnexpected( - response: - | JobSchedulePatchJobSchedule200Response - | JobSchedulePatchJobScheduleDefaultResponse -): response is JobSchedulePatchJobScheduleDefaultResponse; + response: ListTasks200Response | ListTasksDefaultResponse +): response is ListTasksDefaultResponse; export function isUnexpected( response: - | JobScheduleUpdateJobSchedule200Response - | JobScheduleUpdateJobScheduleDefaultResponse -): response is JobScheduleUpdateJobScheduleDefaultResponse; + | CreateTaskCollection200Response + | CreateTaskCollectionDefaultResponse +): response is CreateTaskCollectionDefaultResponse; export function isUnexpected( - response: - | JobScheduleDisableJobSchedule204Response - | JobScheduleDisableJobScheduleDefaultResponse -): response is JobScheduleDisableJobScheduleDefaultResponse; + response: DeleteTask200Response | DeleteTaskDefaultResponse +): response is DeleteTaskDefaultResponse; export function isUnexpected( - response: - | JobScheduleEnableJobSchedule204Response - | JobScheduleEnableJobScheduleDefaultResponse -): response is JobScheduleEnableJobScheduleDefaultResponse; + response: GetTask200Response | GetTaskDefaultResponse +): response is GetTaskDefaultResponse; export function isUnexpected( - response: - | JobScheduleTerminateJobSchedule202Response - | JobScheduleTerminateJobScheduleDefaultResponse -): response is JobScheduleTerminateJobScheduleDefaultResponse; + response: ReplaceTask200Response | ReplaceTaskDefaultResponse +): response is ReplaceTaskDefaultResponse; export function isUnexpected( - response: - | JobScheduleAddJobSchedule201Response - | JobScheduleAddJobScheduleDefaultResponse -): response is JobScheduleAddJobScheduleDefaultResponse; + response: ListSubTasks200Response | ListSubTasksDefaultResponse +): response is ListSubTasksDefaultResponse; export function isUnexpected( - response: - | JobScheduleListJobSchedules200Response - | JobScheduleListJobSchedulesDefaultResponse -): response is JobScheduleListJobSchedulesDefaultResponse; + response: TerminateTask204Response | TerminateTaskDefaultResponse +): response is TerminateTaskDefaultResponse; export function isUnexpected( - response: TaskAddTask201Response | TaskAddTaskDefaultResponse -): response is TaskAddTaskDefaultResponse; + response: ReactivateTask204Response | ReactivateTaskDefaultResponse +): response is ReactivateTaskDefaultResponse; export function isUnexpected( - response: TaskListTasks200Response | TaskListTasksDefaultResponse -): response is TaskListTasksDefaultResponse; + response: DeleteTaskFile200Response | DeleteTaskFileDefaultResponse +): response is DeleteTaskFileDefaultResponse; export function isUnexpected( - response: - | TaskAddTaskCollection200Response - | TaskAddTaskCollectionDefaultResponse -): response is TaskAddTaskCollectionDefaultResponse; + response: GetTaskFile200Response | GetTaskFileDefaultResponse +): response is GetTaskFileDefaultResponse; export function isUnexpected( response: - | TaskDeleteTaskCollection200Response - | TaskDeleteTaskCollectionDefaultResponse -): response is TaskDeleteTaskCollectionDefaultResponse; + | GetTaskFileProperties200Response + | GetTaskFilePropertiesDefaultResponse +): response is GetTaskFilePropertiesDefaultResponse; export function isUnexpected( - response: - | TaskGetTaskCollection200Response - | TaskGetTaskCollectionDefaultResponse -): response is TaskGetTaskCollectionDefaultResponse; + response: ListTaskFiles200Response | ListTaskFilesDefaultResponse +): response is ListTaskFilesDefaultResponse; export function isUnexpected( - response: - | TaskUpdateTaskCollection200Response - | TaskUpdateTaskCollectionDefaultResponse -): response is TaskUpdateTaskCollectionDefaultResponse; + response: CreateNodeUser201Response | CreateNodeUserDefaultResponse +): response is CreateNodeUserDefaultResponse; export function isUnexpected( - response: TaskListSubtasks200Response | TaskListSubtasksDefaultResponse -): response is TaskListSubtasksDefaultResponse; + response: DeleteNodeUser200Response | DeleteNodeUserDefaultResponse +): response is DeleteNodeUserDefaultResponse; export function isUnexpected( - response: - | TaskTerminateTaskCollection204Response - | TaskTerminateTaskCollectionDefaultResponse -): response is TaskTerminateTaskCollectionDefaultResponse; + response: ReplaceNodeUser200Response | ReplaceNodeUserDefaultResponse +): response is ReplaceNodeUserDefaultResponse; export function isUnexpected( - response: - | TaskReactivateTaskCollection204Response - | TaskReactivateTaskCollectionDefaultResponse -): response is TaskReactivateTaskCollectionDefaultResponse; + response: GetNode200Response | GetNodeDefaultResponse +): response is GetNodeDefaultResponse; export function isUnexpected( - response: ComputeNodesAddUser201Response | ComputeNodesAddUserDefaultResponse -): response is ComputeNodesAddUserDefaultResponse; + response: RebootNode202Response | RebootNodeDefaultResponse +): response is RebootNodeDefaultResponse; export function isUnexpected( - response: - | ComputeNodesDeleteUser200Response - | ComputeNodesDeleteUserDefaultResponse -): response is ComputeNodesDeleteUserDefaultResponse; + response: ReimageNode202Response | ReimageNodeDefaultResponse +): response is ReimageNodeDefaultResponse; export function isUnexpected( response: - | ComputeNodesUpdateUser200Response - | ComputeNodesUpdateUserDefaultResponse -): response is ComputeNodesUpdateUserDefaultResponse; + | DisableNodeScheduling200Response + | DisableNodeSchedulingDefaultResponse +): response is DisableNodeSchedulingDefaultResponse; export function isUnexpected( response: - | ComputeNodesGetComputeNode200Response - | ComputeNodesGetComputeNodeDefaultResponse -): response is ComputeNodesGetComputeNodeDefaultResponse; + | EnableNodeScheduling200Response + | EnableNodeSchedulingDefaultResponse +): response is EnableNodeSchedulingDefaultResponse; export function isUnexpected( response: - | ComputeNodesRebootComputeNode202Response - | ComputeNodesRebootComputeNodeDefaultResponse -): response is ComputeNodesRebootComputeNodeDefaultResponse; + | GetNodeRemoteLoginSettings200Response + | GetNodeRemoteLoginSettingsDefaultResponse +): response is GetNodeRemoteLoginSettingsDefaultResponse; export function isUnexpected( response: - | ComputeNodesReimageComputeNode202Response - | ComputeNodesReimageComputeNodeDefaultResponse -): response is ComputeNodesReimageComputeNodeDefaultResponse; + | GetNodeRemoteDesktopFile200Response + | GetNodeRemoteDesktopFileDefaultResponse +): response is GetNodeRemoteDesktopFileDefaultResponse; export function isUnexpected( - response: - | ComputeNodesDisableScheduling200Response - | ComputeNodesDisableSchedulingDefaultResponse -): response is ComputeNodesDisableSchedulingDefaultResponse; + response: UploadNodeLogs200Response | UploadNodeLogsDefaultResponse +): response is UploadNodeLogsDefaultResponse; export function isUnexpected( - response: - | ComputeNodesEnableScheduling200Response - | ComputeNodesEnableSchedulingDefaultResponse -): response is ComputeNodesEnableSchedulingDefaultResponse; + response: ListNodes200Response | ListNodesDefaultResponse +): response is ListNodesDefaultResponse; export function isUnexpected( - response: - | ComputeNodesGetRemoteLoginSettings200Response - | ComputeNodesGetRemoteLoginSettingsDefaultResponse -): response is ComputeNodesGetRemoteLoginSettingsDefaultResponse; + response: GetNodeExtension200Response | GetNodeExtensionDefaultResponse +): response is GetNodeExtensionDefaultResponse; export function isUnexpected( - response: - | ComputeNodesGetRemoteDesktop200Response - | ComputeNodesGetRemoteDesktopDefaultResponse -): response is ComputeNodesGetRemoteDesktopDefaultResponse; + response: ListNodeExtensions200Response | ListNodeExtensionsDefaultResponse +): response is ListNodeExtensionsDefaultResponse; export function isUnexpected( - response: - | ComputeNodesUploadBatchServiceLogs200Response - | ComputeNodesUploadBatchServiceLogsDefaultResponse -): response is ComputeNodesUploadBatchServiceLogsDefaultResponse; + response: DeleteNodeFile200Response | DeleteNodeFileDefaultResponse +): response is DeleteNodeFileDefaultResponse; export function isUnexpected( - response: ComputeNodesList200Response | ComputeNodesListDefaultResponse -): response is ComputeNodesListDefaultResponse; + response: GetNodeFile200Response | GetNodeFileDefaultResponse +): response is GetNodeFileDefaultResponse; export function isUnexpected( response: - | ComputeNodeExtensionsGetComputeNodeExtensions200Response - | ComputeNodeExtensionsGetComputeNodeExtensionsDefaultResponse -): response is ComputeNodeExtensionsGetComputeNodeExtensionsDefaultResponse; + | GetNodeFileProperties200Response + | GetNodeFilePropertiesDefaultResponse +): response is GetNodeFilePropertiesDefaultResponse; export function isUnexpected( - response: - | ComputeNodeExtensionsListComputeNodeExtensions200Response - | ComputeNodeExtensionsListComputeNodeExtensionsDefaultResponse -): response is ComputeNodeExtensionsListComputeNodeExtensionsDefaultResponse; + response: ListNodeFiles200Response | ListNodeFilesDefaultResponse +): response is ListNodeFilesDefaultResponse; export function isUnexpected( response: - | ApplicationsListApplications200Response - | ApplicationsListApplicationsDefaultResponse - | ApplicationsGet200Response - | ApplicationsGetDefaultResponse - | PoolListUsageMetrics200Response - | PoolListUsageMetricsDefaultResponse - | PoolGetAllPoolLifetimeStatistics200Response - | PoolGetAllPoolLifetimeStatisticsDefaultResponse - | PoolAddPool201Response - | PoolAddPoolDefaultResponse - | PoolListPools200Response - | PoolListPoolsDefaultResponse - | PoolDeletePool202Response - | PoolDeletePoolDefaultResponse + | ListApplications200Response + | ListApplicationsDefaultResponse + | GetApplication200Response + | GetApplicationDefaultResponse + | ListPoolUsageMetrics200Response + | ListPoolUsageMetricsDefaultResponse + | CreatePool201Response + | CreatePoolDefaultResponse + | ListPools200Response + | ListPoolsDefaultResponse + | DeletePool202Response + | DeletePoolDefaultResponse | PoolExists200Response | PoolExists404Response | PoolExistsDefaultResponse - | PoolGetPool200Response - | PoolGetPoolDefaultResponse - | PoolPatchPool200Response - | PoolPatchPoolDefaultResponse - | PoolDisableAutoScale200Response - | PoolDisableAutoScaleDefaultResponse - | PoolEnableAutoScale200Response - | PoolEnableAutoScaleDefaultResponse - | PoolEvaluateAutoScale200Response - | PoolEvaluateAutoScaleDefaultResponse - | PoolResize202Response - | PoolResizeDefaultResponse - | PoolStopResize202Response - | PoolStopResizeDefaultResponse - | PoolUpdateProperties204Response - | PoolUpdatePropertiesDefaultResponse - | PoolRemoveNodes202Response - | PoolRemoveNodesDefaultResponse - | AccountListSupportedImages200Response - | AccountListSupportedImagesDefaultResponse - | AccountListPoolNodeCounts200Response - | AccountListPoolNodeCountsDefaultResponse - | JobGetAllJobLifetimeStatistics200Response - | JobGetAllJobLifetimeStatisticsDefaultResponse - | JobDeleteJob202Response - | JobDeleteJobDefaultResponse - | JobGetJob200Response - | JobGetJobDefaultResponse - | JobPatchJob200Response - | JobPatchJobDefaultResponse - | JobUpdateJob200Response - | JobUpdateJobDefaultResponse - | JobDisableJob202Response - | JobDisableJobDefaultResponse - | JobEnableJob202Response - | JobEnableJobDefaultResponse - | JobTerminateJob202Response - | JobTerminateJobDefaultResponse - | JobAddJob201Response - | JobAddJobDefaultResponse - | JobListJobs200Response - | JobListJobsDefaultResponse - | JobListFromJobSchedule200Response - | JobListFromJobScheduleDefaultResponse - | JobListPreparationAndReleaseTaskStatus200Response - | JobListPreparationAndReleaseTaskStatusDefaultResponse - | JobGetTaskCounts200Response - | JobGetTaskCountsDefaultResponse - | CertificatesAddCertificate201Response - | CertificatesAddCertificateDefaultResponse - | CertificatesListCertificates200Response - | CertificatesListCertificatesDefaultResponse - | CertificatesCancelCertificateDeletion204Response - | CertificatesCancelCertificateDeletionDefaultResponse - | CertificatesDeleteCertificate202Response - | CertificatesDeleteCertificateDefaultResponse - | CertificatesGetCertificate200Response - | CertificatesGetCertificateDefaultResponse - | FileDeleteFromTask200Response - | FileDeleteFromTaskDefaultResponse - | FileGetFromTask200Response - | FileGetFromTaskDefaultResponse - | FileGetPropertiesFromTask200Response - | FileGetPropertiesFromTaskDefaultResponse - | FileDeleteFromComputeNode200Response - | FileDeleteFromComputeNodeDefaultResponse - | FileGetFromComputeNode200Response - | FileGetFromComputeNodeDefaultResponse - | FileGetPropertiesFromComputeNode200Response - | FileGetPropertiesFromComputeNodeDefaultResponse - | FileListFromTask200Response - | FileListFromTaskDefaultResponse - | FileListFromComputeNode200Response - | FileListFromComputeNodeDefaultResponse - | JobScheduleJobScheduleExists200Response - | JobScheduleJobScheduleExists404Response - | JobScheduleJobScheduleExistsDefaultResponse - | JobScheduleDeleteJobSchedule202Response - | JobScheduleDeleteJobScheduleDefaultResponse - | JobScheduleGetJobSchedule200Response - | JobScheduleGetJobScheduleDefaultResponse - | JobSchedulePatchJobSchedule200Response - | JobSchedulePatchJobScheduleDefaultResponse - | JobScheduleUpdateJobSchedule200Response - | JobScheduleUpdateJobScheduleDefaultResponse - | JobScheduleDisableJobSchedule204Response - | JobScheduleDisableJobScheduleDefaultResponse - | JobScheduleEnableJobSchedule204Response - | JobScheduleEnableJobScheduleDefaultResponse - | JobScheduleTerminateJobSchedule202Response - | JobScheduleTerminateJobScheduleDefaultResponse - | JobScheduleAddJobSchedule201Response - | JobScheduleAddJobScheduleDefaultResponse - | JobScheduleListJobSchedules200Response - | JobScheduleListJobSchedulesDefaultResponse - | TaskAddTask201Response - | TaskAddTaskDefaultResponse - | TaskListTasks200Response - | TaskListTasksDefaultResponse - | TaskAddTaskCollection200Response - | TaskAddTaskCollectionDefaultResponse - | TaskDeleteTaskCollection200Response - | TaskDeleteTaskCollectionDefaultResponse - | TaskGetTaskCollection200Response - | TaskGetTaskCollectionDefaultResponse - | TaskUpdateTaskCollection200Response - | TaskUpdateTaskCollectionDefaultResponse - | TaskListSubtasks200Response - | TaskListSubtasksDefaultResponse - | TaskTerminateTaskCollection204Response - | TaskTerminateTaskCollectionDefaultResponse - | TaskReactivateTaskCollection204Response - | TaskReactivateTaskCollectionDefaultResponse - | ComputeNodesAddUser201Response - | ComputeNodesAddUserDefaultResponse - | ComputeNodesDeleteUser200Response - | ComputeNodesDeleteUserDefaultResponse - | ComputeNodesUpdateUser200Response - | ComputeNodesUpdateUserDefaultResponse - | ComputeNodesGetComputeNode200Response - | ComputeNodesGetComputeNodeDefaultResponse - | ComputeNodesRebootComputeNode202Response - | ComputeNodesRebootComputeNodeDefaultResponse - | ComputeNodesReimageComputeNode202Response - | ComputeNodesReimageComputeNodeDefaultResponse - | ComputeNodesDisableScheduling200Response - | ComputeNodesDisableSchedulingDefaultResponse - | ComputeNodesEnableScheduling200Response - | ComputeNodesEnableSchedulingDefaultResponse - | ComputeNodesGetRemoteLoginSettings200Response - | ComputeNodesGetRemoteLoginSettingsDefaultResponse - | ComputeNodesGetRemoteDesktop200Response - | ComputeNodesGetRemoteDesktopDefaultResponse - | ComputeNodesUploadBatchServiceLogs200Response - | ComputeNodesUploadBatchServiceLogsDefaultResponse - | ComputeNodesList200Response - | ComputeNodesListDefaultResponse - | ComputeNodeExtensionsGetComputeNodeExtensions200Response - | ComputeNodeExtensionsGetComputeNodeExtensionsDefaultResponse - | ComputeNodeExtensionsListComputeNodeExtensions200Response - | ComputeNodeExtensionsListComputeNodeExtensionsDefaultResponse + | GetPool200Response + | GetPoolDefaultResponse + | UpdatePool200Response + | UpdatePoolDefaultResponse + | DisablePoolAutoScale200Response + | DisablePoolAutoScaleDefaultResponse + | EnablePoolAutoScale200Response + | EnablePoolAutoScaleDefaultResponse + | EvaluatePoolAutoScale200Response + | EvaluatePoolAutoScaleDefaultResponse + | ResizePool202Response + | ResizePoolDefaultResponse + | StopPoolResize202Response + | StopPoolResizeDefaultResponse + | ReplacePoolProperties204Response + | ReplacePoolPropertiesDefaultResponse + | RemoveNodes202Response + | RemoveNodesDefaultResponse + | ListSupportedImages200Response + | ListSupportedImagesDefaultResponse + | ListPoolNodeCounts200Response + | ListPoolNodeCountsDefaultResponse + | DeleteJob202Response + | DeleteJobDefaultResponse + | GetJob200Response + | GetJobDefaultResponse + | UpdateJob200Response + | UpdateJobDefaultResponse + | ReplaceJob200Response + | ReplaceJobDefaultResponse + | DisableJob202Response + | DisableJobDefaultResponse + | EnableJob202Response + | EnableJobDefaultResponse + | TerminateJob202Response + | TerminateJobDefaultResponse + | CreateJob201Response + | CreateJobDefaultResponse + | ListJobs200Response + | ListJobsDefaultResponse + | ListJobsFromSchedule200Response + | ListJobsFromScheduleDefaultResponse + | ListJobPreparationAndReleaseTaskStatus200Response + | ListJobPreparationAndReleaseTaskStatusDefaultResponse + | GetJobTaskCounts200Response + | GetJobTaskCountsDefaultResponse + | CreateCertificate201Response + | CreateCertificateDefaultResponse + | ListCertificates200Response + | ListCertificatesDefaultResponse + | CancelCertificateDeletion204Response + | CancelCertificateDeletionDefaultResponse + | DeleteCertificate202Response + | DeleteCertificateDefaultResponse + | GetCertificate200Response + | GetCertificateDefaultResponse + | JobScheduleExists200Response + | JobScheduleExists404Response + | JobScheduleExistsDefaultResponse + | DeleteJobSchedule202Response + | DeleteJobScheduleDefaultResponse + | GetJobSchedule200Response + | GetJobScheduleDefaultResponse + | UpdateJobSchedule200Response + | UpdateJobScheduleDefaultResponse + | ReplaceJobSchedule200Response + | ReplaceJobScheduleDefaultResponse + | DisableJobSchedule204Response + | DisableJobScheduleDefaultResponse + | EnableJobSchedule204Response + | EnableJobScheduleDefaultResponse + | TerminateJobSchedule202Response + | TerminateJobScheduleDefaultResponse + | CreateJobSchedule201Response + | CreateJobScheduleDefaultResponse + | ListJobSchedules200Response + | ListJobSchedulesDefaultResponse + | CreateTask201Response + | CreateTaskDefaultResponse + | ListTasks200Response + | ListTasksDefaultResponse + | CreateTaskCollection200Response + | CreateTaskCollectionDefaultResponse + | DeleteTask200Response + | DeleteTaskDefaultResponse + | GetTask200Response + | GetTaskDefaultResponse + | ReplaceTask200Response + | ReplaceTaskDefaultResponse + | ListSubTasks200Response + | ListSubTasksDefaultResponse + | TerminateTask204Response + | TerminateTaskDefaultResponse + | ReactivateTask204Response + | ReactivateTaskDefaultResponse + | DeleteTaskFile200Response + | DeleteTaskFileDefaultResponse + | GetTaskFile200Response + | GetTaskFileDefaultResponse + | GetTaskFileProperties200Response + | GetTaskFilePropertiesDefaultResponse + | ListTaskFiles200Response + | ListTaskFilesDefaultResponse + | CreateNodeUser201Response + | CreateNodeUserDefaultResponse + | DeleteNodeUser200Response + | DeleteNodeUserDefaultResponse + | ReplaceNodeUser200Response + | ReplaceNodeUserDefaultResponse + | GetNode200Response + | GetNodeDefaultResponse + | RebootNode202Response + | RebootNodeDefaultResponse + | ReimageNode202Response + | ReimageNodeDefaultResponse + | DisableNodeScheduling200Response + | DisableNodeSchedulingDefaultResponse + | EnableNodeScheduling200Response + | EnableNodeSchedulingDefaultResponse + | GetNodeRemoteLoginSettings200Response + | GetNodeRemoteLoginSettingsDefaultResponse + | GetNodeRemoteDesktopFile200Response + | GetNodeRemoteDesktopFileDefaultResponse + | UploadNodeLogs200Response + | UploadNodeLogsDefaultResponse + | ListNodes200Response + | ListNodesDefaultResponse + | GetNodeExtension200Response + | GetNodeExtensionDefaultResponse + | ListNodeExtensions200Response + | ListNodeExtensionsDefaultResponse + | DeleteNodeFile200Response + | DeleteNodeFileDefaultResponse + | GetNodeFile200Response + | GetNodeFileDefaultResponse + | GetNodeFileProperties200Response + | GetNodeFilePropertiesDefaultResponse + | ListNodeFiles200Response + | ListNodeFilesDefaultResponse ): response is - | ApplicationsListApplicationsDefaultResponse - | ApplicationsGetDefaultResponse - | PoolListUsageMetricsDefaultResponse - | PoolGetAllPoolLifetimeStatisticsDefaultResponse - | PoolAddPoolDefaultResponse - | PoolListPoolsDefaultResponse - | PoolDeletePoolDefaultResponse + | ListApplicationsDefaultResponse + | GetApplicationDefaultResponse + | ListPoolUsageMetricsDefaultResponse + | CreatePoolDefaultResponse + | ListPoolsDefaultResponse + | DeletePoolDefaultResponse | PoolExistsDefaultResponse - | PoolGetPoolDefaultResponse - | PoolPatchPoolDefaultResponse - | PoolDisableAutoScaleDefaultResponse - | PoolEnableAutoScaleDefaultResponse - | PoolEvaluateAutoScaleDefaultResponse - | PoolResizeDefaultResponse - | PoolStopResizeDefaultResponse - | PoolUpdatePropertiesDefaultResponse - | PoolRemoveNodesDefaultResponse - | AccountListSupportedImagesDefaultResponse - | AccountListPoolNodeCountsDefaultResponse - | JobGetAllJobLifetimeStatisticsDefaultResponse - | JobDeleteJobDefaultResponse - | JobGetJobDefaultResponse - | JobPatchJobDefaultResponse - | JobUpdateJobDefaultResponse - | JobDisableJobDefaultResponse - | JobEnableJobDefaultResponse - | JobTerminateJobDefaultResponse - | JobAddJobDefaultResponse - | JobListJobsDefaultResponse - | JobListFromJobScheduleDefaultResponse - | JobListPreparationAndReleaseTaskStatusDefaultResponse - | JobGetTaskCountsDefaultResponse - | CertificatesAddCertificateDefaultResponse - | CertificatesListCertificatesDefaultResponse - | CertificatesCancelCertificateDeletionDefaultResponse - | CertificatesDeleteCertificateDefaultResponse - | CertificatesGetCertificateDefaultResponse - | FileDeleteFromTaskDefaultResponse - | FileGetFromTaskDefaultResponse - | FileGetPropertiesFromTaskDefaultResponse - | FileDeleteFromComputeNodeDefaultResponse - | FileGetFromComputeNodeDefaultResponse - | FileGetPropertiesFromComputeNodeDefaultResponse - | FileListFromTaskDefaultResponse - | FileListFromComputeNodeDefaultResponse - | JobScheduleJobScheduleExistsDefaultResponse - | JobScheduleDeleteJobScheduleDefaultResponse - | JobScheduleGetJobScheduleDefaultResponse - | JobSchedulePatchJobScheduleDefaultResponse - | JobScheduleUpdateJobScheduleDefaultResponse - | JobScheduleDisableJobScheduleDefaultResponse - | JobScheduleEnableJobScheduleDefaultResponse - | JobScheduleTerminateJobScheduleDefaultResponse - | JobScheduleAddJobScheduleDefaultResponse - | JobScheduleListJobSchedulesDefaultResponse - | TaskAddTaskDefaultResponse - | TaskListTasksDefaultResponse - | TaskAddTaskCollectionDefaultResponse - | TaskDeleteTaskCollectionDefaultResponse - | TaskGetTaskCollectionDefaultResponse - | TaskUpdateTaskCollectionDefaultResponse - | TaskListSubtasksDefaultResponse - | TaskTerminateTaskCollectionDefaultResponse - | TaskReactivateTaskCollectionDefaultResponse - | ComputeNodesAddUserDefaultResponse - | ComputeNodesDeleteUserDefaultResponse - | ComputeNodesUpdateUserDefaultResponse - | ComputeNodesGetComputeNodeDefaultResponse - | ComputeNodesRebootComputeNodeDefaultResponse - | ComputeNodesReimageComputeNodeDefaultResponse - | ComputeNodesDisableSchedulingDefaultResponse - | ComputeNodesEnableSchedulingDefaultResponse - | ComputeNodesGetRemoteLoginSettingsDefaultResponse - | ComputeNodesGetRemoteDesktopDefaultResponse - | ComputeNodesUploadBatchServiceLogsDefaultResponse - | ComputeNodesListDefaultResponse - | ComputeNodeExtensionsGetComputeNodeExtensionsDefaultResponse - | ComputeNodeExtensionsListComputeNodeExtensionsDefaultResponse { + | GetPoolDefaultResponse + | UpdatePoolDefaultResponse + | DisablePoolAutoScaleDefaultResponse + | EnablePoolAutoScaleDefaultResponse + | EvaluatePoolAutoScaleDefaultResponse + | ResizePoolDefaultResponse + | StopPoolResizeDefaultResponse + | ReplacePoolPropertiesDefaultResponse + | RemoveNodesDefaultResponse + | ListSupportedImagesDefaultResponse + | ListPoolNodeCountsDefaultResponse + | DeleteJobDefaultResponse + | GetJobDefaultResponse + | UpdateJobDefaultResponse + | ReplaceJobDefaultResponse + | DisableJobDefaultResponse + | EnableJobDefaultResponse + | TerminateJobDefaultResponse + | CreateJobDefaultResponse + | ListJobsDefaultResponse + | ListJobsFromScheduleDefaultResponse + | ListJobPreparationAndReleaseTaskStatusDefaultResponse + | GetJobTaskCountsDefaultResponse + | CreateCertificateDefaultResponse + | ListCertificatesDefaultResponse + | CancelCertificateDeletionDefaultResponse + | DeleteCertificateDefaultResponse + | GetCertificateDefaultResponse + | JobScheduleExistsDefaultResponse + | DeleteJobScheduleDefaultResponse + | GetJobScheduleDefaultResponse + | UpdateJobScheduleDefaultResponse + | ReplaceJobScheduleDefaultResponse + | DisableJobScheduleDefaultResponse + | EnableJobScheduleDefaultResponse + | TerminateJobScheduleDefaultResponse + | CreateJobScheduleDefaultResponse + | ListJobSchedulesDefaultResponse + | CreateTaskDefaultResponse + | ListTasksDefaultResponse + | CreateTaskCollectionDefaultResponse + | DeleteTaskDefaultResponse + | GetTaskDefaultResponse + | ReplaceTaskDefaultResponse + | ListSubTasksDefaultResponse + | TerminateTaskDefaultResponse + | ReactivateTaskDefaultResponse + | DeleteTaskFileDefaultResponse + | GetTaskFileDefaultResponse + | GetTaskFilePropertiesDefaultResponse + | ListTaskFilesDefaultResponse + | CreateNodeUserDefaultResponse + | DeleteNodeUserDefaultResponse + | ReplaceNodeUserDefaultResponse + | GetNodeDefaultResponse + | RebootNodeDefaultResponse + | ReimageNodeDefaultResponse + | DisableNodeSchedulingDefaultResponse + | EnableNodeSchedulingDefaultResponse + | GetNodeRemoteLoginSettingsDefaultResponse + | GetNodeRemoteDesktopFileDefaultResponse + | UploadNodeLogsDefaultResponse + | ListNodesDefaultResponse + | GetNodeExtensionDefaultResponse + | ListNodeExtensionsDefaultResponse + | DeleteNodeFileDefaultResponse + | GetNodeFileDefaultResponse + | GetNodeFilePropertiesDefaultResponse + | ListNodeFilesDefaultResponse { const lroOriginal = response.headers["x-ms-original-url"]; const url = new URL(lroOriginal ?? response.request.url); const method = response.request.method; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/models.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/models.ts index 0825de8eae..4f9821ce62 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/models.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/models.ts @@ -1,203 +1,59 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -/** Contains utilization and resource usage statistics for the lifetime of a Pool. */ -export interface PoolStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** Statistics related to Pool usage information. */ - usageStats?: UsageStatistics; - /** Statistics related to resource consumption by Compute Nodes in a Pool. */ - resourceStats?: ResourceStatistics; -} - -/** Statistics related to Pool usage information. */ -export interface UsageStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** - * The aggregated wall-clock time of the dedicated Compute Node cores being part - * of the Pool. - */ - dedicatedCoreTime: string; -} - -/** Statistics related to resource consumption by Compute Nodes in a Pool. */ -export interface ResourceStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** - * The average CPU usage across all Compute Nodes in the Pool (percentage per - * node). - */ - avgCPUPercentage: number; - /** The average memory usage in GiB across all Compute Nodes in the Pool. */ - avgMemoryGiB: number; - /** The peak memory usage in GiB across all Compute Nodes in the Pool. */ - peakMemoryGiB: number; - /** The average used disk space in GiB across all Compute Nodes in the Pool. */ - avgDiskGiB: number; - /** The peak used disk space in GiB across all Compute Nodes in the Pool. */ - peakDiskGiB: number; - /** The total number of disk read operations across all Compute Nodes in the Pool. */ - diskReadIOps: number; - /** The total number of disk write operations across all Compute Nodes in the Pool. */ - diskWriteIOps: number; - /** - * The total amount of data in GiB of disk reads across all Compute Nodes in the - * Pool. - */ - diskReadGiB: number; - /** - * The total amount of data in GiB of disk writes across all Compute Nodes in the - * Pool. - */ - diskWriteGiB: number; - /** - * The total amount of data in GiB of network reads across all Compute Nodes in - * the Pool. - */ - networkReadGiB: number; - /** - * The total amount of data in GiB of network writes across all Compute Nodes in - * the Pool. - */ - networkWriteGiB: number; -} - -/** A Pool in the Azure Batch service. */ -export interface BatchPool { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores, and cannot contain more than 64 characters. The ID is - * case-preserving and case-insensitive (that is, you may not have two IDs within - * an Account that differ only by case). - */ - id?: string; - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ +/** Options for creating an Azure Batch Pool. */ +export interface BatchPoolCreateOptions { + /** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). */ + id: string; + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ - vmSize?: string; - /** - * This property and virtualMachineConfiguration are mutually exclusive and one of - * the properties must be specified. This property cannot be specified if the - * Batch Account was created with its poolAllocationMode property set to - * 'UserSubscription'. - */ + /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines for Cloud Services Pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and A2V2. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */ + vmSize: string; + /** The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ cloudServiceConfiguration?: CloudServiceConfiguration; - /** - * This property and cloudServiceConfiguration are mutually exclusive and one of - * the properties must be specified. - */ + /** The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. */ virtualMachineConfiguration?: VirtualMachineConfiguration; - /** - * This is the timeout for the most recent resize operation. (The initial sizing - * when the Pool is created counts as a resize.) The default value is 15 minutes. - */ + /** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ resizeTimeout?: string; - /** The desired number of dedicated Compute Nodes in the Pool. */ + /** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ targetDedicatedNodes?: number; - /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ targetLowPriorityNodes?: number; - /** - * If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must - * be specified. If true, the autoScaleFormula property is required and the Pool - * automatically resizes according to the formula. The default value is false. - */ + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */ enableAutoScale?: boolean; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ + /** A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). */ autoScaleFormula?: string; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ autoScaleEvaluationInterval?: string; - /** - * This imposes restrictions on which Compute Nodes can be assigned to the Pool. - * Specifying this value can reduce the chance of the requested number of Compute - * Nodes to be allocated in the Pool. - */ + /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. */ enableInterNodeCommunication?: boolean; - /** The network configuration for a Pool. */ + /** The network configuration for the Pool. */ networkConfiguration?: NetworkConfiguration; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ + /** A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. */ startTask?: StartTask; /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ certificateReferences?: Array; - /** - * Changes to Package references affect all new Nodes joining the Pool, but do not - * affect Compute Nodes that are already in the Pool until they are rebooted or - * reimaged. There is a maximum of 10 Package references on any given Pool. - */ + /** The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ applicationPackageReferences?: Array; - /** - * The list of application licenses must be a subset of available Batch service - * application licenses. If a license is requested which is not supported, Pool - * creation will fail. - */ + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. */ applicationLicenses?: string[]; - /** - * The default value is 1. The maximum value is the smaller of 4 times the number - * of cores of the vmSize of the pool or 256. - */ + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ taskSlotsPerNode?: number; - /** If not specified, the default is spread. */ + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ taskSchedulingPolicy?: TaskSchedulingPolicy; /** The list of user Accounts to be created on each Compute Node in the Pool. */ userAccounts?: Array; - /** A list of name-value pairs associated with the Pool as metadata. */ + /** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; - /** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ + /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. */ mountConfiguration?: Array; /** - * If omitted, the default value is Default. + * The desired node communication mode for the pool. If omitted, the default value is Default. * * Possible values: default, classic, simplified */ @@ -223,10 +79,7 @@ export interface CloudServiceConfiguration { * (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). */ osFamily: string; - /** - * The default value is * which specifies the latest operating system version for - * the specified OS family. - */ + /** The Azure Guest OS version to be installed on the virtual machines in the Pool. The default value is * which specifies the latest operating system version for the specified OS family. */ osVersion?: string; } @@ -235,39 +88,13 @@ export interface CloudServiceConfiguration { * Machines infrastructure. */ export interface VirtualMachineConfiguration { - /** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ + /** A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. */ imageReference: ImageReference; - /** - * The Batch Compute Node agent is a program that runs on each Compute Node in the - * Pool, and provides the command-and-control interface between the Compute Node - * and the Batch service. There are different implementations of the Compute Node - * agent, known as SKUs, for different operating systems. You must specify a - * Compute Node agent SKU which matches the selected Image reference. To get the - * list of supported Compute Node agent SKUs along with their list of verified - * Image references, see the 'List supported Compute Node agent SKUs' operation. - */ + /** The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. */ nodeAgentSKUId: string; - /** - * This property must not be specified if the imageReference property specifies a - * Linux OS Image. - */ + /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ windowsConfiguration?: WindowsConfiguration; - /** - * This property must be specified if the Compute Nodes in the Pool need to have - * empty data disks attached to them. This cannot be updated. Each Compute Node - * gets its own disk (the disk is not a file share). Existing disks cannot be - * attached, each attached disk is empty. When the Compute Node is removed from - * the Pool, the disk and all data associated with it is also deleted. The disk is - * not formatted after being attached, it must be formatted before use - for more - * information see - * https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux - * and - * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. - */ + /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ dataDisks?: Array; /** * This only applies to Images that contain the Windows operating system, and @@ -281,29 +108,15 @@ export interface VirtualMachineConfiguration { * */ licenseType?: string; - /** - * If specified, setup is performed on each Compute Node in the Pool to allow - * Tasks to run in containers. All regular Tasks and Job manager Tasks run on this - * Pool must specify the containerSettings property, and all other Tasks may - * specify it. - */ + /** The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. */ containerConfiguration?: ContainerConfiguration; - /** - * If specified, encryption is performed on each node in the pool during node - * provisioning. - */ + /** The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. */ diskEncryptionConfiguration?: DiskEncryptionConfiguration; - /** - * This configuration will specify rules on how nodes in the pool will be - * physically allocated. - */ + /** The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. */ nodePlacementConfiguration?: NodePlacementConfiguration; - /** - * If specified, the extensions mentioned in this configuration will be installed - * on each node. - */ + /** The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. */ extensions?: Array; - /** Settings for the operating system disk of the compute node (VM). */ + /** Settings for the operating system disk of the Virtual Machine. */ osDisk?: OSDisk; } @@ -313,32 +126,21 @@ export interface VirtualMachineConfiguration { * verified by Azure Batch, see the 'List Supported Images' operation. */ export interface ImageReference { - /** For example, Canonical or MicrosoftWindowsServer. */ + /** The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. */ publisher?: string; - /** For example, UbuntuServer or WindowsServer. */ + /** The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. */ offer?: string; - /** For example, 18.04-LTS or 2019-Datacenter. */ + /** The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. */ sku?: string; - /** - * A value of 'latest' can be specified to select the latest version of an Image. - * If omitted, the default is 'latest'. - */ + /** The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. */ version?: string; - /** - * This property is mutually exclusive with other ImageReference properties. The - * Shared Image Gallery Image must have replicas in the same region and must be in - * the same subscription as the Azure Batch account. If the image version is not - * specified in the imageId, the latest version will be used. For information - * about the firewall settings for the Batch Compute Node agent to communicate - * with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - */ + /** The ARM resource identifier of the Shared Image Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Shared Image Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ virtualMachineImageId?: string; } /** Windows operating system settings to apply to the virtual machine. */ export interface WindowsConfiguration { - /** If omitted, the default value is true. */ + /** Whether automatic updates are enabled on the virtual machine. If omitted, the default value is true. */ enableAutomaticUpdates?: boolean; } @@ -348,16 +150,10 @@ export interface WindowsConfiguration { * disks from within a VM to use them. */ export interface DataDisk { - /** - * The lun is used to uniquely identify each data disk. If attaching multiple - * disks, each should have a distinct lun. The value must be between 0 and 63, - * inclusive. - */ + /** The logical unit number. The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. The value must be between 0 and 63, inclusive. */ lun: number; /** - * The default value for caching is readwrite. For information about the caching - * options see: - * https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + * The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. * * Possible values: none, readonly, readwrite */ @@ -365,7 +161,7 @@ export interface DataDisk { /** The initial disk size in gigabytes. */ diskSizeGB: number; /** - * If omitted, the default is "standard_lrs". + * The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". * * Possible values: standard_lrs, premium_lrs */ @@ -377,19 +173,12 @@ export interface ContainerConfiguration { /** * The container technology to be used. * - * Possible values: dockerCompatible + * Possible values: dockerCompatible, criCompatible */ type: string; - /** - * This is the full Image reference, as would be specified to "docker pull". An - * Image will be sourced from the default Docker registry unless the Image is - * fully qualified with an alternative registry. - */ + /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ containerImageNames?: string[]; - /** - * If any Images must be downloaded from a private registry which requires - * credentials, then those credentials must be provided here. - */ + /** Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. */ containerRegistries?: Array; } @@ -399,20 +188,17 @@ export interface ContainerRegistry { username?: string; /** The password to log into the registry server. */ password?: string; - /** If omitted, the default is "docker.io". */ + /** The registry URL. If omitted, the default is "docker.io". */ registryServer?: string; - /** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ - identityReference?: ComputeNodeIdentityReference; + /** The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. */ + identityReference?: BatchNodeIdentityReference; } /** * The reference to a user assigned identity associated with the Batch pool which * a compute node will use. */ -export interface ComputeNodeIdentityReference { +export interface BatchNodeIdentityReference { /** The ARM resource id of the user assigned identity. */ resourceId?: string; } @@ -423,11 +209,7 @@ export interface ComputeNodeIdentityReference { * Image Gallery Image. */ export interface DiskEncryptionConfiguration { - /** - * If omitted, no disks on the compute nodes in the pool will be encrypted. On - * Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" - * and "TemporaryDisk" must be specified. - */ + /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ targets?: string[]; } @@ -438,8 +220,7 @@ export interface DiskEncryptionConfiguration { */ export interface NodePlacementConfiguration { /** - * Allocation policy used by Batch Service to provision the nodes. If not - * specified, Batch will use the regional policy. + * Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. * * Possible values: regional, zonal */ @@ -456,34 +237,21 @@ export interface VMExtension { type: string; /** The version of script handler. */ typeHandlerVersion?: string; - /** - * Indicates whether the extension should use a newer minor version if one is - * available at deployment time. Once deployed, however, the extension will not - * upgrade minor versions unless redeployed, even with this property set to true. - */ + /** Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. */ autoUpgradeMinorVersion?: boolean; + /** Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available. */ + enableAutomaticUpgrade?: boolean; /** JSON formatted public settings for the extension. */ - settings?: Object; - /** - * The extension can contain either protectedSettings or - * protectedSettingsFromKeyVault or no protected settings at all. - */ - protectedSettings?: Object; - /** - * Collection of extension names after which this extension needs to be - * provisioned. - */ + settings?: Record; + /** The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. */ + protectedSettings?: Record; + /** The collection of extension names. Collection of extension names after which this extension needs to be provisioned. */ provisionAfterExtensions?: string[]; } -export interface Object {} - /** Settings for the operating system disk of the compute node (VM). */ export interface OSDisk { - /** - * Specifies the ephemeral Disk Settings for the operating system disk used by the - * compute node (VM). - */ + /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ ephemeralOSDiskSettings?: DiffDiskSettings; } @@ -493,96 +261,16 @@ export interface OSDisk { */ export interface DiffDiskSettings { /** - * This property can be used by user in the request to choose the location e.g., - * cache disk space for Ephemeral OS disk provisioning. For more information on - * Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size - * requirements for Windows VMs at - * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - * and Linux VMs at - * https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. * * Possible values: cachedisk */ placement?: string; } -/** An error that occurred when resizing a Pool. */ -export interface ResizeError { - /** - * An identifier for the Pool resize error. Codes are invariant and are intended - * to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Pool resize error, intended to be suitable for display - * in a user interface. - */ - message?: string; - /** A list of additional error details related to the Pool resize error. */ - values?: Array; -} - -/** Represents a name-value pair. */ -export interface NameValuePair { - /** The name in the name-value pair. */ - name?: string; - /** The value in the name-value pair. */ - value?: string; -} - -/** The results and errors from an execution of a Pool autoscale formula. */ -export interface AutoScaleRun { - /** - * Each variable value is returned in the form $variable=value, and variables are - * separated by semicolons. - */ - results?: string; - /** An error that occurred when executing or evaluating a Pool autoscale formula. */ - error?: AutoScaleRunError; -} - -/** An error that occurred when executing or evaluating a Pool autoscale formula. */ -export interface AutoScaleRunError { - /** - * An identifier for the autoscale error. Codes are invariant and are intended to - * be consumed programmatically. - */ - code?: string; - /** - * A message describing the autoscale error, intended to be suitable for display - * in a user interface. - */ - message?: string; - /** A list of additional error details related to the autoscale error. */ - values?: Array; -} - /** The network configuration for a Pool. */ export interface NetworkConfiguration { - /** - * The virtual network must be in the same region and subscription as the Azure - * Batch Account. The specified subnet should have enough free IP addresses to - * accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have - * enough free IP addresses, the Pool will partially allocate Nodes and a resize - * error will occur. The 'MicrosoftAzureBatch' service principal must have the - * 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for - * the specified VNet. The specified subnet must allow communication from the - * Azure Batch service to be able to schedule Tasks on the Nodes. This can be - * verified by checking if the specified VNet has any associated Network Security - * Groups (NSG). If communication to the Nodes in the specified subnet is denied - * by an NSG, then the Batch service will set the state of the Compute Nodes to - * unusable. For Pools created with virtualMachineConfiguration only ARM virtual - * networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools - * created with cloudServiceConfiguration both ARM and classic virtual networks - * are supported. If the specified VNet has any associated Network Security Groups - * (NSG), then a few reserved system ports must be enabled for inbound - * communication. For Pools created with a virtual machine configuration, enable - * ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. - * For Pools created with a cloud service configuration, enable ports 10100, - * 20100, and 30100. Also enable outbound connections to Azure Storage on port - * 443. For more details see: - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - */ + /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools created with cloudServiceConfiguration both ARM and classic virtual networks are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For Pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId?: string; /** * The scope of dynamic vnet assignment. @@ -590,26 +278,17 @@ export interface NetworkConfiguration { * Possible values: none, job */ dynamicVNetAssignmentScope?: string; - /** - * Pool endpoint configuration is only supported on Pools with the - * virtualMachineConfiguration property. - */ + /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ endpointConfiguration?: PoolEndpointConfiguration; - /** - * Public IP configuration property is only supported on Pools with the - * virtualMachineConfiguration property. - */ + /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ publicIPAddressConfiguration?: PublicIPAddressConfiguration; + /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ + enableAcceleratedNetworking?: boolean; } /** The endpoint configuration for a Pool. */ export interface PoolEndpointConfiguration { - /** - * The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum - * number of inbound NAT Pools is exceeded the request fails with HTTP status code - * 400. This cannot be specified if the IPAddressProvisioningType is - * NoPublicIPAddresses. - */ + /** A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. */ inboundNATPools: Array; } @@ -618,13 +297,7 @@ export interface PoolEndpointConfiguration { * in a Batch Pool externally. */ export interface InboundNATPool { - /** - * The name must be unique within a Batch Pool, can contain letters, numbers, - * underscores, periods, and hyphens. Names must start with a letter or number, - * must end with a letter, number, or underscore, and cannot exceed 77 characters. - * If any invalid values are provided the request fails with HTTP status code - * 400. - */ + /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ name: string; /** * The protocol of the endpoint. @@ -632,47 +305,19 @@ export interface InboundNATPool { * Possible values: tcp, udp */ protocol: string; - /** - * This must be unique within a Batch Pool. Acceptable values are between 1 and - * 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any - * reserved values are provided the request fails with HTTP status code 400. - */ + /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ backendPort: number; - /** - * Acceptable values range between 1 and 65534 except ports from 50000 to 55000 - * which are reserved. All ranges within a Pool must be distinct and cannot - * overlap. Each range must contain at least 40 ports. If any reserved or - * overlapping values are provided the request fails with HTTP status code 400. - */ + /** The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ frontendPortRangeStart: number; - /** - * Acceptable values range between 1 and 65534 except ports from 50000 to 55000 - * which are reserved by the Batch service. All ranges within a Pool must be - * distinct and cannot overlap. Each range must contain at least 40 ports. If any - * reserved or overlapping values are provided the request fails with HTTP status - * code 400. - */ + /** The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ frontendPortRangeEnd: number; - /** - * The maximum number of rules that can be specified across all the endpoints on a - * Batch Pool is 25. If no network security group rules are specified, a default - * rule will be created to allow inbound access to the specified backendPort. If - * the maximum number of network security group rules is exceeded the request - * fails with HTTP status code 400. - */ + /** A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. */ networkSecurityGroupRules?: Array; } /** A network security group rule to apply to an inbound endpoint. */ export interface NetworkSecurityGroupRule { - /** - * Priorities within a Pool must be unique and are evaluated in order of priority. - * The lower the number the higher the priority. For example, rules could be - * specified with order numbers of 150, 250, and 350. The rule with the order - * number of 150 takes precedence over the rule that has an order of 250. Allowed - * priorities are 150 to 4096. If any reserved or duplicate values are provided - * the request fails with HTTP status code 400. - */ + /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ priority: number; /** * The action that should be taken for a specified IP address, subnet range or tag. @@ -680,37 +325,21 @@ export interface NetworkSecurityGroupRule { * Possible values: allow, deny */ access: string; - /** - * Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. - * 192.168.1.0/24), default tag, or * (for all addresses). If any other values - * are provided the request fails with HTTP status code 400. - */ + /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ sourceAddressPrefix: string; - /** - * Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a - * port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each - * entry in this collection must not overlap any other entry (either a range or an - * individual port). If any other values are provided the request fails with HTTP - * status code 400. The default value is '*'. - */ + /** The source port ranges to match for the rule. Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'. */ sourcePortRanges?: string[]; } /** The public IP Address configuration of the networking configuration of a Pool. */ export interface PublicIPAddressConfiguration { /** - * The default value is BatchManaged. + * The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. * * Possible values: batchmanaged, usermanaged, nopublicipaddresses */ provision?: string; - /** - * The number of IPs specified here limits the maximum size of the Pool - 100 - * dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public - * IP. For example, a pool needing 250 dedicated VMs would need at least 3 public - * IPs specified. Each element of this collection is of the form: - * /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - */ + /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ ipAddressIds?: string[]; } @@ -730,75 +359,32 @@ export interface PublicIPAddressConfiguration { * block Batch from being able to re-run the StartTask. */ export interface StartTask { - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ + /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ + /** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettings; - /** Files listed under this element are located in the Task's working directory. */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. */ resourceFiles?: Array; /** A list of environment variable settings for the StartTask. */ environmentSettings?: Array; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ + /** The user identity under which the StartTask runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ userIdentity?: UserIdentity; - /** - * The Batch service retries a Task if its exit code is nonzero. Note that this - * value specifically controls the number of retries. The Batch service will try - * the Task once, and may then retry up to this limit. For example, if the maximum - * retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 - * retries). If the maximum retry count is 0, the Batch service does not retry the - * Task. If the maximum retry count is -1, the Batch service retries the Task - * without limit, however this is not recommended for a start task or any task. - * The default value is 0 (no retries) - */ + /** The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). */ maxTaskRetryCount?: number; - /** - * If true and the StartTask fails on a Node, the Batch service retries the - * StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has - * still not completed successfully after all retries, then the Batch service - * marks the Node unusable, and will not schedule Tasks to it. This condition can - * be detected via the Compute Node state and failure info details. If false, the - * Batch service will not wait for the StartTask to complete. In this case, other - * Tasks can start executing on the Compute Node while the StartTask is still - * running; and even if the StartTask fails, new Tasks will continue to be - * scheduled on the Compute Node. The default is true. - */ + /** Whether the Batch service should wait for the StartTask to complete successfully (that is, to exit with exit code 0) before scheduling any Tasks on the Compute Node. If true and the StartTask fails on a Node, the Batch service retries the StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has still not completed successfully after all retries, then the Batch service marks the Node unusable, and will not schedule Tasks to it. This condition can be detected via the Compute Node state and failure info details. If false, the Batch service will not wait for the StartTask to complete. In this case, other Tasks can start executing on the Compute Node while the StartTask is still running; and even if the StartTask fails, new Tasks will continue to be scheduled on the Compute Node. The default is true. */ waitForSuccess?: boolean; } /** The container settings for a Task. */ export interface TaskContainerSettings { - /** - * These additional options are supplied as arguments to the "docker create" - * command, in addition to those controlled by the Batch Service. - */ + /** Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. */ containerRunOptions?: string; - /** - * This is the full Image reference, as would be specified to "docker pull". If - * no tag is provided as part of the Image name, the tag ":latest" is used as a - * default. - */ + /** The Image to use to create the container in which the Task will run. This is the full Image reference, as would be specified to "docker pull". If no tag is provided as part of the Image name, the tag ":latest" is used as a default. */ imageName: string; - /** This setting can be omitted if was already provided at Pool creation. */ + /** The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. */ registry?: ContainerRegistry; /** - * The default is 'taskWorkingDirectory'. + * The location of the container Task working directory. The default is 'taskWorkingDirectory'. * * Possible values: taskWorkingDirectory, containerImageDefault */ @@ -807,59 +393,20 @@ export interface TaskContainerSettings { /** A single file or multiple files to be downloaded to a Compute Node. */ export interface ResourceFile { - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. - */ + /** The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. */ autoStorageContainerName?: string; - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. This URL must be readable - * and listable from compute nodes. There are three ways to get such a URL for a - * container in Azure storage: include a Shared Access Signature (SAS) granting - * read and list permissions on the container, use a managed identity with read - * and list permissions, or set the ACL for the container to allow public access. - */ + /** The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. */ storageContainerUrl?: string; - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. If the URL points to - * Azure Blob Storage, it must be readable from compute nodes. There are three - * ways to get such a URL for a blob in Azure storage: include a Shared Access - * Signature (SAS) granting read permissions on the blob, use a managed identity - * with read permission, or set the ACL for the blob or its container to allow - * public access. - */ + /** The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access. */ httpUrl?: string; - /** - * The property is valid only when autoStorageContainerName or storageContainerUrl - * is used. This prefix can be a partial filename or a subdirectory. If a prefix - * is not specified, all the files in the container will be downloaded. - */ + /** The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded. */ blobPrefix?: string; - /** - * If the httpUrl property is specified, the filePath is required and describes - * the path which the file will be downloaded to, including the filename. - * Otherwise, if the autoStorageContainerName or storageContainerUrl property is - * specified, filePath is optional and is the directory to download the files to. - * In the case where filePath is used as a directory, any directory structure - * already associated with the input data will be retained in full and appended to - * the specified filePath directory. The specified relative path cannot break out - * of the Task's working directory (for example by using '..'). - */ + /** The location on the Compute Node to which to download the file(s), relative to the Task's working directory. If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..'). */ filePath?: string; - /** - * This property applies only to files being downloaded to Linux Compute Nodes. It - * will be ignored if it is specified for a resourceFile which will be downloaded - * to a Windows Compute Node. If this property is not specified for a Linux - * Compute Node, then a default value of 0770 is applied to the file. - */ + /** The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file. */ fileMode?: string; - /** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ - identityReference?: ComputeNodeIdentityReference; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by storageContainerUrl or httpUrl. */ + identityReference?: BatchNodeIdentityReference; } /** An environment variable to be set on a Task process. */ @@ -870,100 +417,60 @@ export interface EnvironmentSetting { value?: string; } -/** Specify either the userName or autoUser property, but not both. */ +/** The definition of the user identity under which the Task is run. Specify either the userName or autoUser property, but not both. */ export interface UserIdentity { - /** - * The userName and autoUser properties are mutually exclusive; you must specify - * one but not both. - */ + /** The name of the user identity under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. */ username?: string; - /** - * The userName and autoUser properties are mutually exclusive; you must specify - * one but not both. - */ + /** The auto user under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. */ autoUser?: AutoUserSpecification; } -/** - * Specifies the parameters for the auto user that runs a Task on the Batch - * service. - */ +/** Specifies the options for the auto user that runs an Azure Batch Task. */ export interface AutoUserSpecification { /** - * The default value is pool. If the pool is running Windows a value of Task - * should be specified if stricter isolation between tasks is required. For - * example, if the task mutates the registry in a way which could impact other - * tasks, or if certificates have been specified on the pool which should not be - * accessible by normal tasks but should be accessible by StartTasks. + * The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks. * * Possible values: task, pool */ scope?: string; /** - * The default value is nonAdmin. + * The elevation level of the auto user. The default value is nonAdmin. * * Possible values: nonadmin, admin */ elevationLevel?: string; } -/** A reference to a Certificate to be installed on Compute Nodes in a Pool. */ +/** A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ export interface CertificateReference { /** The thumbprint of the Certificate. */ thumbprint: string; /** The algorithm with which the thumbprint is associated. This must be sha1. */ thumbprintAlgorithm: string; /** - * The default value is currentuser. This property is applicable only for Pools - * configured with Windows Compute Nodes (that is, created with - * cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows - * Image reference). For Linux Compute Nodes, the Certificates are stored in a - * directory inside the Task working directory and an environment variable - * AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - * For Certificates with visibility of 'remoteUser', a 'certs' directory is - * created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. + * The location of the Certificate store on the Compute Node into which to install the Certificate. The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. * * Possible values: currentuser, localmachine */ storeLocation?: string; - /** - * This property is applicable only for Pools configured with Windows Compute - * Nodes (that is, created with cloudServiceConfiguration, or with - * virtualMachineConfiguration using a Windows Image reference). Common store - * names include: My, Root, CA, Trust, Disallowed, TrustedPeople, - * TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be - * used. The default value is My. - */ + /** The name of the Certificate store on the Compute Node into which to install the Certificate. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. */ storeName?: string; - /** - * You can specify more than one visibility in this collection. The default is all - * Accounts. - */ + /** Which user Accounts on the Compute Node should have access to the private data of the Certificate. You can specify more than one visibility in this collection. The default is all Accounts. */ visibility?: string[]; } /** A reference to an Package to be deployed to Compute Nodes. */ export interface ApplicationPackageReference { - /** - * When creating a pool, the package's application ID must be fully qualified - * (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - */ + /** The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). */ applicationId: string; - /** - * If this is omitted on a Pool, and no default version is specified for this - * application, the request fails with the error code - * InvalidApplicationPackageReferences and HTTP status code 409. If this is - * omitted on a Task, and no default version is specified for this application, - * the Task fails with a pre-processing error. - */ + /** The version of the application to deploy. If omitted, the default version is deployed. If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error. */ version?: string; } /** Specifies how Tasks should be distributed across Compute Nodes. */ export interface TaskSchedulingPolicy { /** - * If not specified, the default is spread. + * How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. * * Possible values: spread, pack */ @@ -975,60 +482,36 @@ export interface TaskSchedulingPolicy { * Compute Node. */ export interface UserAccount { - /** - * The name of the user Account. Names can contain any Unicode characters up to a - * maximum length of 20. - */ + /** The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. */ name: string; /** The password for the user Account. */ password: string; /** - * The default value is nonAdmin. + * The elevation level of the user Account. The default value is nonAdmin. * * Possible values: nonadmin, admin */ elevationLevel?: string; - /** - * This property is ignored if specified on a Windows Pool. If not specified, the - * user is created with the default options. - */ + /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ linuxUserConfiguration?: LinuxUserConfiguration; - /** - * This property can only be specified if the user is on a Windows Pool. If not - * specified and on a Windows Pool, the user is created with the default options. - */ + /** The Windows-specific user configuration for the user Account. This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options. */ windowsUserConfiguration?: WindowsUserConfiguration; } /** Properties used to create a user Account on a Linux Compute Node. */ export interface LinuxUserConfiguration { - /** - * The uid and gid properties must be specified together or not at all. If not - * specified the underlying operating system picks the uid. - */ + /** The user ID of the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid. */ uid?: number; - /** - * The uid and gid properties must be specified together or not at all. If not - * specified the underlying operating system picks the gid. - */ + /** The group ID for the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid. */ gid?: number; - /** - * The private key must not be password protected. The private key is used to - * automatically configure asymmetric-key based authentication for SSH between - * Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication - * property is true (it is ignored if enableInterNodeCommunication is false). It - * does this by placing the key pair into the user's .ssh directory. If not - * specified, password-less SSH is not configured between Compute Nodes (no - * modification of the user's .ssh directory is done). - */ + /** The SSH private key for the user Account. The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done). */ sshPrivateKey?: string; } /** Properties used to create a user Account on a Windows Compute Node. */ export interface WindowsUserConfiguration { /** - * The default value for VirtualMachineConfiguration Pools is 'batch' and for - * CloudServiceConfiguration Pools is 'interactive'. + * The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch' and for CloudServiceConfiguration Pools is 'interactive'. * * Possible values: batch, interactive */ @@ -1048,13 +531,13 @@ export interface MetadataItem { /** The file system to mount on each node. */ export interface MountConfiguration { - /** This property is mutually exclusive with all other properties. */ + /** The Azure Storage Container to mount using blob FUSE on each node. This property is mutually exclusive with all other properties. */ azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; - /** This property is mutually exclusive with all other properties. */ + /** The NFS file system to mount on each node. This property is mutually exclusive with all other properties. */ nfsMountConfiguration?: NFSMountConfiguration; - /** This property is mutually exclusive with all other properties. */ + /** The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all other properties. */ cifsMountConfiguration?: CifsMountConfiguration; - /** This property is mutually exclusive with all other properties. */ + /** The Azure File Share to mount on each node. This property is mutually exclusive with all other properties. */ azureFileShareConfiguration?: AzureFileShareConfiguration; } @@ -1064,40 +547,25 @@ export interface AzureBlobFileSystemConfiguration { accountName: string; /** The Azure Blob Storage Container name. */ containerName: string; - /** - * This property is mutually exclusive with both sasKey and identity; exactly one - * must be specified. - */ + /** The Azure Storage Account key. This property is mutually exclusive with both sasKey and identity; exactly one must be specified. */ accountKey?: string; - /** - * This property is mutually exclusive with both accountKey and identity; exactly - * one must be specified. - */ + /** The Azure Storage SAS token. This property is mutually exclusive with both accountKey and identity; exactly one must be specified. */ sasKey?: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ blobfuseOptions?: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ relativeMountPath: string; - /** - * This property is mutually exclusive with both accountKey and sasKey; exactly - * one must be specified. - */ - identityReference?: ComputeNodeIdentityReference; + /** The reference to the user assigned identity to use to access containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one must be specified. */ + identityReference?: BatchNodeIdentityReference; } /** Information used to connect to an NFS file system. */ export interface NFSMountConfiguration { /** The URI of the file system to mount. */ source: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ mountOptions?: string; } @@ -1107,12 +575,9 @@ export interface CifsMountConfiguration { username: string; /** The URI of the file system to mount. */ source: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ mountOptions?: string; /** The password to use for authentication against the CIFS file system. */ password: string; @@ -1122,295 +587,150 @@ export interface CifsMountConfiguration { export interface AzureFileShareConfiguration { /** The Azure Storage account name. */ accountName: string; - /** This is of the form 'https://{account}.file.core.windows.net/'. */ + /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */ azureFileUrl: string; /** The Azure Storage account key. */ accountKey: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ mountOptions?: string; } -/** The identity of the Batch pool, if configured. */ -export interface BatchPoolIdentity { +/** Represents a name-value pair. */ +export interface NameValuePair { + /** The name in the name-value pair. */ + name?: string; + /** The value in the name-value pair. */ + value?: string; +} + +/** Options for updating an Azure Batch Pool. */ +export interface BatchPoolUpdateOptions { + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. */ + startTask?: StartTask; /** - * The list of user identities associated with the Batch pool. The user identity - * dictionary key references will be ARM resource ids in the form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - * - * Possible values: UserAssigned, None + * If this element is present, it replaces any existing Certificate references configured on the Pool. + * If omitted, any existing Certificate references are left unchanged. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ - type: string; + certificateReferences?: Array; + /** A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. */ + applicationPackageReferences?: Array; + /** A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. */ + metadata?: Array; /** - * The user identity dictionary key references will be ARM resource ids in the - * form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + * The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. + * + * Possible values: default, classic, simplified */ - userAssignedIdentities?: Array; -} - -/** The user assigned Identity */ -export interface UserAssignedIdentity { - /** The ARM resource id of the user assigned identity */ - resourceId: string; + targetNodeCommunicationMode?: string; } -/** Options for enabling automatic scaling on a Pool. */ -export interface BatchPoolEnableAutoScaleParameters { - /** - * The formula is checked for validity before it is applied to the Pool. If the - * formula is not valid, the Batch service rejects the request with detailed error - * information. For more information about specifying this formula, see - * Automatically scale Compute Nodes in an Azure Batch Pool - * (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - */ +/** Options for enabling automatic scaling on an Azure Batch Pool. */ +export interface BatchPoolEnableAutoScaleOptions { + /** The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ autoScaleFormula?: string; - /** - * The default value is 15 minutes. The minimum and maximum value are 5 minutes - * and 168 hours respectively. If you specify a value less than 5 minutes or - * greater than 168 hours, the Batch service rejects the request with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). If you specify a new interval, then the existing - * autoscale evaluation schedule will be stopped and a new autoscale evaluation - * schedule will be started, with its starting time being the time when this - * request was issued. - */ + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */ autoScaleEvaluationInterval?: string; } -/** Options for evaluating an automatic scaling formula on a Pool. */ -export interface BatchPoolEvaluateAutoScaleParameters { - /** - * The formula is validated and its results calculated, but it is not applied to - * the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a - * Pool'. For more information about specifying this formula, see Automatically - * scale Compute Nodes in an Azure Batch Pool - * (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - */ +/** Options for evaluating an automatic scaling formula on an Azure Batch Pool. */ +export interface BatchPoolEvaluateAutoScaleOptions { + /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ autoScaleFormula: string; } -/** Options for changing the size of a Pool. */ -export interface BatchPoolResizeParameters { +/** Options for changing the size of an Azure Batch Pool. */ +export interface BatchPoolResizeOptions { /** The desired number of dedicated Compute Nodes in the Pool. */ targetDedicatedNodes?: number; /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ targetLowPriorityNodes?: number; - /** - * The default value is 15 minutes. The minimum value is 5 minutes. If you specify - * a value less than 5 minutes, the Batch service returns an error; if you are - * calling the REST API directly, the HTTP status code is 400 (Bad Request). - */ + /** The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ resizeTimeout?: string; /** - * The default value is requeue. + * Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. * * Possible values: requeue, terminate, taskcompletion, retaineddata */ nodeDeallocationOption?: string; } -/** Options for removing Compute Nodes from a Pool. */ -export interface NodeRemoveParameters { - /** A maximum of 100 nodes may be removed per request. */ - nodeList: string[]; +/** Options for replacing properties on an Azure Batch Pool. */ +export interface BatchPoolReplaceOptions { + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. */ + startTask?: StartTask; /** - * The default value is 15 minutes. The minimum value is 5 minutes. If you specify - * a value less than 5 minutes, the Batch service returns an error; if you are - * calling the REST API directly, the HTTP status code is 400 (Bad Request). + * This list replaces any existing Certificate references configured on the Pool. + * If you specify an empty collection, any existing Certificate references are removed from the Pool. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ - resizeTimeout?: string; + certificateReferences: Array; + /** The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. */ + applicationPackageReferences: Array; + /** A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. */ + metadata: Array; /** - * The default value is requeue. + * The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. * - * Possible values: requeue, terminate, taskcompletion, retaineddata + * Possible values: default, classic, simplified */ - nodeDeallocationOption?: string; + targetNodeCommunicationMode?: string; } -/** Resource usage statistics for a Job. */ -export interface JobStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: Date | string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in the Job. - */ - userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in the Job. - */ - kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on - * a Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If a Task was retried, this - * includes the wall clock time of all the Task retries. - */ - wallClockTime: string; - /** The total number of disk read operations made by all Tasks in the Job. */ - readIOps: number; - /** The total number of disk write operations made by all Tasks in the Job. */ - writeIOps: number; - /** The total amount of data in GiB read from disk by all Tasks in the Job. */ - readIOGiB: number; - /** The total amount of data in GiB written to disk by all Tasks in the Job. */ - writeIOGiB: number; - /** A Task completes successfully if it returns exit code 0. */ - numSucceededTasks: number; - /** - * A Task fails if it exhausts its maximum retry count without returning exit code - * 0. - */ - numFailedTasks: number; - /** - * The total number of retries on all the Tasks in the Job during the given time - * range. - */ - numTaskRetries: number; +/** Options for removing nodes from an Azure Batch Pool. */ +export interface NodeRemoveOptions { + /** A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. */ + nodeList: string[]; + /** The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + resizeTimeout?: string; /** - * The wait time for a Task is defined as the elapsed time between the creation of - * the Task and the start of Task execution. (If the Task is retried due to - * failures, the wait time is the time to the most recent Task execution.) This - * value is only reported in the Account lifetime statistics; it is not included - * in the Job statistics. + * Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. + * + * Possible values: requeue, terminate, taskcompletion, retaineddata */ - waitTime: string; + nodeDeallocationOption?: string; } /** An Azure Batch Job. */ export interface BatchJob { - /** - * The ID is case-preserving and case-insensitive (that is, you may not have two - * IDs within an Account that differ only by case). - */ - id?: string; - /** The display name for the Job. */ - displayName?: string; - /** - * Whether Tasks in the Job can define dependencies on each other. The default is - * false. - */ - usesTaskDependencies?: boolean; - /** - * Priority values can range from -1000 to 1000, with -1000 being the lowest - * priority and 1000 being the highest priority. The default value is 0. - */ + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ priority?: number; - /** - * If the value is set to True, other high priority jobs submitted to the system - * will take precedence and will be able requeue tasks from this job. You can - * update a job's allowTaskPreemption after it has been created using the update - * job API. - */ + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ allowTaskPreemption?: boolean; - /** - * The value of maxParallelTasks must be -1 or greater than 0 if specified. If not - * specified, the default value is -1, which means there's no limit to the number - * of tasks that can be run at once. You can update a job's maxParallelTasks after - * it has been created using the update job API. - */ + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ maxParallelTasks?: number; - /** The execution constraints for a Job. */ + /** The execution constraints for the Job. */ constraints?: JobConstraints; + /** The Pool settings associated with the Job. */ + poolInfo: PoolInformation; /** - * The Job Manager Task is automatically started when the Job is created. The - * Batch service tries to schedule the Job Manager Task before any other Tasks in - * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where - * Job Manager Tasks are running for as long as possible (that is, Compute Nodes - * running 'normal' Tasks are removed before Compute Nodes running Job Manager - * Tasks). When a Job Manager Task fails and needs to be restarted, the system - * tries to schedule it at the highest priority. If there are no idle Compute - * Nodes available, the system may terminate one of the running Tasks in the Pool - * and return it to the queue in order to make room for the Job Manager Task to - * restart. Note that a Job Manager Task in one Job does not have priority over - * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For - * example, if a Job Manager in a priority 0 Job needs to be restarted, it will - * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery - * operation is triggered on a Node. Examples of recovery operations include (but - * are not limited to) when an unhealthy Node is rebooted or a Compute Node - * disappeared due to host failure. Retries due to recovery operations are - * independent of and are not counted against the maxTaskRetryCount. Even if the - * maxTaskRetryCount is 0, an internal retry due to a recovery operation may - * occur. Because of this, all Tasks should be idempotent. This means Tasks need - * to tolerate being interrupted and restarted without causing any corruption or - * duplicate data. The best practice for long running Tasks is to use some form of - * checkpointing. - */ - jobManagerTask?: JobManagerTask; - /** - * The Job Preparation Task is a special Task run on each Compute Node before any - * other Task of the Job. - */ - jobPreparationTask?: JobPreparationTask; - /** - * The Job Release Task is a special Task run at the end of the Job on each - * Compute Node that has run any other Task of the Job. - */ - jobReleaseTask?: JobReleaseTask; - /** - * Individual Tasks can override an environment setting specified here by - * specifying the same setting name with a different value. - */ - commonEnvironmentSettings?: Array; - /** Specifies how a Job should be assigned to a Pool. */ - poolInfo?: PoolInformation; - /** - * The default is noaction. + * The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. * * Possible values: noaction, terminatejob */ onAllTasksComplete?: string; - /** - * A Task is considered to have failed if has a failureInfo. A failureInfo is set - * if the Task completes with a non-zero exit code after exhausting its retry - * count, or if there was an error starting the Task, for example due to a - * resource file download error. The default is noaction. - * - * Possible values: noaction, performexitoptionsjobaction - */ - onTaskFailure?: string; - /** The network configuration for the Job. */ - networkConfiguration?: JobNetworkConfiguration; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ + /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; } /** The execution constraints for a Job. */ export interface JobConstraints { - /** - * If the Job does not complete within the time limit, the Batch service - * terminates it and any Tasks that are still running. In this case, the - * termination reason will be MaxWallClockTimeExpiry. If this property is not - * specified, there is no time limit on how long the Job may run. - */ + /** The maximum elapsed time that the Job may run, measured from the time the Job is created. If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run. */ maxWallClockTime?: string; - /** - * Note that this value specifically controls the number of retries. The Batch - * service will try each Task once, and may then retry up to this limit. For - * example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one - * initial try and 3 retries). If the maximum retry count is 0, the Batch service - * does not retry Tasks. If the maximum retry count is -1, the Batch service - * retries the Task without limit, however this is not recommended for a start - * task or any task. The default value is 0 (no retries) - */ + /** The maximum number of times each Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries). */ maxTaskRetryCount?: number; } /** + * Specifies details of a Job Manager Task. * The Job Manager Task is automatically started when the Job is created. The * Batch service tries to schedule the Job Manager Task before any other Tasks in * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where @@ -1435,191 +755,90 @@ export interface JobConstraints { * checkpointing. */ export interface JobManagerTask { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. - */ + /** A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. */ id: string; - /** - * It need not be unique and can contain any Unicode characters up to a maximum - * length of 1024. - */ + /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ + /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; - /** - * If the Pool that will run this Task has containerConfiguration set, this must - * be set as well. If the Pool that will run this Task doesn't have - * containerConfiguration set, this must not be set. When this is specified, all - * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure - * Batch directories on the node) are mapped into the container, all Task - * environment variables are mapped into the container, and the Task command line - * is executed in the container. Files produced in the container outside of - * AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that - * Batch file APIs will not be able to access those files. - */ + /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettings; - /** - * Files listed under this element are located in the Task's working directory. - * There is a maximum size for the list of resource files. When the max size is - * exceeded, the request will fail and the response error code will be - * RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be - * reduced in size. This can be achieved using .zip files, Application Packages, - * or Docker Containers. - */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ resourceFiles?: Array; - /** - * For multi-instance Tasks, the files will only be uploaded from the Compute Node - * on which the primary Task is executed. - */ + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ outputFiles?: Array; /** A list of environment variable settings for the Job Manager Task. */ environmentSettings?: Array; - /** Execution constraints to apply to a Task. */ + /** Constraints that apply to the Job Manager Task. */ constraints?: TaskConstraints; - /** - * The default is 1. A Task can only be scheduled to run on a compute node if the - * node has enough free scheduling slots available. For multi-instance Tasks, this - * property is not supported and must not be specified. - */ + /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this property is not supported and must not be specified. */ requiredSlots?: number; - /** - * If true, when the Job Manager Task completes, the Batch service marks the Job - * as complete. If any Tasks are still running at this time (other than Job - * Release), those Tasks are terminated. If false, the completion of the Job - * Manager Task does not affect the Job status. In this case, you should either - * use the onAllTasksComplete attribute to terminate the Job, or have a client or - * user terminate the Job explicitly. An example of this is if the Job Manager - * creates a set of Tasks but then takes no further role in their execution. The - * default value is true. If you are using the onAllTasksComplete and - * onTaskFailure attributes to control Job lifetime, and using the Job Manager - * Task only to create the Tasks for the Job (not to monitor progress), then it is - * important to set killJobOnCompletion to false. - */ + /** Whether completion of the Job Manager Task signifies completion of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false. */ killJobOnCompletion?: boolean; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ + /** The user identity under which the Job Manager Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ userIdentity?: UserIdentity; - /** - * If true, no other Tasks will run on the same Node for as long as the Job - * Manager is running. If false, other Tasks can run simultaneously with the Job - * Manager on a Compute Node. The Job Manager Task counts normally against the - * Compute Node's concurrent Task limit, so this is only relevant if the Compute - * Node allows multiple concurrent Tasks. The default value is true. - */ + /** Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true. */ runExclusive?: boolean; /** - * Application Packages are downloaded and deployed to a shared directory, not the - * Task working directory. Therefore, if a referenced Application Package is - * already on the Compute Node, and is up to date, then it is not re-downloaded; + * A list of Application Packages that the Batch service will deploy to the + * Compute Node before running the command line.Application Packages are + * downloaded and deployed to a shared directory, not the Task working + * directory. Therefore, if a referenced Application Package is already + * on the Compute Node, and is up to date, then it is not re-downloaded; * the existing copy on the Compute Node is used. If a referenced Application * Package cannot be installed, for example because the package has been deleted * or because download failed, the Task fails. */ applicationPackageReferences?: Array; - /** - * If this property is set, the Batch service provides the Task with an - * authentication token which can be used to authenticate Batch service operations - * without requiring an Account access key. The token is provided via the - * AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the - * Task can carry out using the token depend on the settings. For example, a Task - * can request Job permissions in order to add other Tasks to the Job, or check - * the status of the Job or of other Tasks under the Job. - */ + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ authenticationTokenSettings?: AuthenticationTokenSettings; - /** The default value is true. */ + /** Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is true. */ allowLowPriorityNode?: boolean; } -/** - * On every file uploads, Batch service writes two log files to the compute node, - * 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn - * more about a specific failure. - */ +/** On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. */ export interface OutputFile { - /** - * Both relative and absolute paths are supported. Relative paths are relative to - * the Task working directory. The following wildcards are supported: * matches 0 - * or more characters (for example pattern abc* would match abc or abcdef), ** - * matches any directory, ? matches any single character, [abc] matches one - * character in the brackets, and [a-c] matches one character in the range. - * Brackets can include a negation to match any character not specified (for - * example [!abc] matches any character but a, b, or c). If a file name starts - * with "." it is ignored by default but may be matched by specifying it - * explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple - * example: **\*.txt matches any file that does not start in '.' and ends with - * .txt in the Task working directory or any subdirectory. If the filename - * contains a wildcard character it can be escaped using brackets (for example - * abc[*] would match a file named abc*). Note that both \ and / are treated as - * directory separators on Windows, but only / is on Linux. Environment variables - * (%var% on Windows or $var on Linux) are expanded prior to the pattern being - * applied. - */ + /** A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. */ filePattern: string; - /** The destination to which a file should be uploaded. */ + /** The destination for the output file(s). */ destination: OutputFileDestination; - /** - * Details about an output file upload operation, including under what conditions - * to perform the upload. - */ + /** Additional options for the upload operation, including under what conditions to perform the upload. */ uploadOptions: OutputFileUploadOptions; } /** The destination to which a file should be uploaded. */ export interface OutputFileDestination { - /** Specifies a file upload destination within an Azure blob storage container. */ + /** A location in Azure blob storage to which files are uploaded. */ container?: OutputFileBlobContainerDestination; } /** Specifies a file upload destination within an Azure blob storage container. */ export interface OutputFileBlobContainerDestination { - /** - * If filePattern refers to a specific file (i.e. contains no wildcards), then - * path is the name of the blob to which to upload that file. If filePattern - * contains one or more wildcards (and therefore may match multiple files), then - * path is the name of the blob virtual directory (which is prepended to each blob - * name) to which to upload the file(s). If omitted, file(s) are uploaded to the - * root of the container with a blob name matching their file name. - */ + /** The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. */ path?: string; - /** - * If not using a managed identity, the URL must include a Shared Access Signature - * (SAS) granting write permissions to the container. - */ + /** The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. */ containerUrl: string; - /** The identity must have write access to the Azure Blob Storage container */ - identityReference?: ComputeNodeIdentityReference; - /** - * These headers will be specified when uploading files to Azure Storage. Official - * document on allowed headers when uploading blobs: - * https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types - */ + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ + identityReference?: BatchNodeIdentityReference; + /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ uploadHeaders?: Array; } /** An HTTP header name-value pair */ export interface HttpHeader { - /** The case-insensitive name of the header to be used while uploading output files */ + /** The case-insensitive name of the header to be used while uploading output files. */ name: string; - /** The value of the header to be used while uploading output files */ + /** The value of the header to be used while uploading output files. */ value?: string; } /** - * Details about an output file upload operation, including under what conditions + * Options for an output file upload operation, including under what conditions * to perform the upload. */ export interface OutputFileUploadOptions { /** - * The default is taskcompletion. + * The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. * * Possible values: tasksuccess, taskfailure, taskcompletion */ @@ -1628,23 +847,11 @@ export interface OutputFileUploadOptions { /** Execution constraints to apply to a Task. */ export interface TaskConstraints { - /** If this is not specified, there is no time limit on how long the Task may run. */ + /** The maximum elapsed time that the Task may run, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run. */ maxWallClockTime?: string; - /** - * The default is 7 days, i.e. the Task directory will be retained for 7 days - * unless the Compute Node is removed or the Job is deleted. - */ + /** The minimum time to retain the Task directory on the Compute Node where it ran, from the time it completes execution. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. */ retentionTime?: string; - /** - * Note that this value specifically controls the number of retries for the Task - * executable due to a nonzero exit code. The Batch service will try the Task - * once, and may then retry up to this limit. For example, if the maximum retry - * count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). - * If the maximum retry count is 0, the Batch service does not retry the Task - * after the first attempt. If the maximum retry count is -1, the Batch service - * retries the Task without limit, however this is not recommended for a start - * task or any task. The default value is 0 (no retries) - */ + /** The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). */ maxTaskRetryCount?: number; } @@ -1653,16 +860,12 @@ export interface TaskConstraints { * service operations. */ export interface AuthenticationTokenSettings { - /** - * The authentication token grants access to a limited set of Batch service - * operations. Currently the only supported value for the access property is - * 'job', which grants access to all operations related to the Job which contains - * the Task. - */ + /** The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. */ access?: string[]; } /** + * A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. * You can use Job Preparation to prepare a Node to run Tasks for the Job. * Activities commonly performed in Job Preparation include: Downloading common * resource files used by all the Tasks in the Job. The Job Preparation Task can @@ -1690,80 +893,28 @@ export interface AuthenticationTokenSettings { * running Tasks is to use some form of checkpointing. */ export interface JobPreparationTask { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. If you do not - * specify this property, the Batch service assigns a default value of - * 'jobpreparation'. No other Task in the Job can have the same ID as the Job - * Preparation Task. If you try to submit a Task with the same id, the Batch - * service rejects the request with error code TaskIdSameAsJobPreparationTask; if - * you are calling the REST API directly, the HTTP status code is 409 (Conflict). - */ + /** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ + /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ + /** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettings; - /** - * Files listed under this element are located in the Task's working directory. - * There is a maximum size for the list of resource files. When the max size is - * exceeded, the request will fail and the response error code will be - * RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be - * reduced in size. This can be achieved using .zip files, Application Packages, - * or Docker Containers. - */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ resourceFiles?: Array; /** A list of environment variable settings for the Job Preparation Task. */ environmentSettings?: Array; - /** Execution constraints to apply to a Task. */ + /** Constraints that apply to the Job Preparation Task. */ constraints?: TaskConstraints; - /** - * If true and the Job Preparation Task fails on a Node, the Batch service retries - * the Job Preparation Task up to its maximum retry count (as specified in the - * constraints element). If the Task has still not completed successfully after - * all retries, then the Batch service will not schedule Tasks of the Job to the - * Node. The Node remains active and eligible to run Tasks of other Jobs. If - * false, the Batch service will not wait for the Job Preparation Task to - * complete. In this case, other Tasks of the Job can start executing on the - * Compute Node while the Job Preparation Task is still running; and even if the - * Job Preparation Task fails, new Tasks will continue to be scheduled on the - * Compute Node. The default value is true. - */ + /** Whether the Batch service should wait for the Job Preparation Task to complete successfully before scheduling any other Tasks of the Job on the Compute Node. A Job Preparation Task has completed successfully if it exits with exit code 0. If true and the Job Preparation Task fails on a Node, the Batch service retries the Job Preparation Task up to its maximum retry count (as specified in the constraints element). If the Task has still not completed successfully after all retries, then the Batch service will not schedule Tasks of the Job to the Node. The Node remains active and eligible to run Tasks of other Jobs. If false, the Batch service will not wait for the Job Preparation Task to complete. In this case, other Tasks of the Job can start executing on the Compute Node while the Job Preparation Task is still running; and even if the Job Preparation Task fails, new Tasks will continue to be scheduled on the Compute Node. The default value is true. */ waitForSuccess?: boolean; - /** - * If omitted, the Task runs as a non-administrative user unique to the Task on - * Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux - * Compute Nodes. - */ + /** The user identity under which the Job Preparation Task runs. If omitted, the Task runs as a non-administrative user unique to the Task on Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux Compute Nodes. */ userIdentity?: UserIdentity; - /** - * The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - * the Job Preparation Task did not complete (e.g. because the reboot occurred - * while the Task was running). Therefore, you should always write a Job - * Preparation Task to be idempotent and to behave correctly if run multiple - * times. The default value is true. - */ + /** Whether the Batch service should rerun the Job Preparation Task after a Compute Node reboots. The Job Preparation Task is always rerun if a Compute Node is reimaged, or if the Job Preparation Task did not complete (e.g. because the reboot occurred while the Task was running). Therefore, you should always write a Job Preparation Task to be idempotent and to behave correctly if run multiple times. The default value is true. */ rerunOnNodeRebootAfterSuccess?: boolean; } /** + * A Job Release Task to run on Job completion on any Compute Node where the Job has run. * The Job Release Task runs when the Job ends, because of one of the following: * The user calls the Terminate Job API, or the Delete Job API while the Job is * still active, the Job's maximum wall clock time constraint is reached, and the @@ -1781,79 +932,29 @@ export interface JobPreparationTask { * specified on the Pool. */ export interface JobReleaseTask { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. If you do not - * specify this property, the Batch service assigns a default value of - * 'jobrelease'. No other Task in the Job can have the same ID as the Job Release - * Task. If you try to submit a Task with the same id, the Batch service rejects - * the request with error code TaskIdSameAsJobReleaseTask; if you are calling the - * REST API directly, the HTTP status code is 409 (Conflict). - */ + /** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ + /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ + /** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettings; - /** Files listed under this element are located in the Task's working directory. */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. */ resourceFiles?: Array; /** A list of environment variable settings for the Job Release Task. */ environmentSettings?: Array; - /** - * The maximum elapsed time that the Job Release Task may run on a given Compute - * Node, measured from the time the Task starts. If the Task does not complete - * within the time limit, the Batch service terminates it. The default value is 15 - * minutes. You may not specify a timeout longer than 15 minutes. If you do, the - * Batch service rejects it with an error; if you are calling the REST API - * directly, the HTTP status code is 400 (Bad Request). - */ + /** The maximum elapsed time that the Job Release Task may run on a given Compute Node, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. The default value is 15 minutes. You may not specify a timeout longer than 15 minutes. If you do, the Batch service rejects it with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ maxWallClockTime?: string; - /** - * The default is 7 days, i.e. the Task directory will be retained for 7 days - * unless the Compute Node is removed or the Job is deleted. - */ + /** The minimum time to retain the Task directory for the Job Release Task on the Compute Node. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. */ retentionTime?: string; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ + /** The user identity under which the Job Release Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ userIdentity?: UserIdentity; } /** Specifies how a Job should be assigned to a Pool. */ export interface PoolInformation { - /** - * You must ensure that the Pool referenced by this property exists. If the Pool - * does not exist at the time the Batch service tries to schedule a Job, no Tasks - * for the Job will run until you create a Pool with that id. Note that the Batch - * service will not reject the Job request; it will simply not run Tasks until the - * Pool exists. You must specify either the Pool ID or the auto Pool - * specification, but not both. - */ + /** The ID of an existing Pool. All the Tasks of the Job will run on the specified Pool. You must ensure that the Pool referenced by this property exists. If the Pool does not exist at the time the Batch service tries to schedule a Job, no Tasks for the Job will run until you create a Pool with that id. Note that the Batch service will not reject the Job request; it will simply not run Tasks until the Pool exists. You must specify either the Pool ID or the auto Pool specification, but not both. */ poolId?: string; - /** - * If auto Pool creation fails, the Batch service moves the Job to a completed - * state, and the Pool creation error is set in the Job's scheduling error - * property. The Batch service manages the lifetime (both creation and, unless - * keepAlive is specified, deletion) of the auto Pool. Any user actions that - * affect the lifetime of the auto Pool while the Job is active will result in - * unexpected behavior. You must specify either the Pool ID or the auto Pool - * specification, but not both. - */ + /** Characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. If auto Pool creation fails, the Batch service moves the Job to a completed state, and the Pool creation error is set in the Job's scheduling error property. The Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) of the auto Pool. Any user actions that affect the lifetime of the auto Pool while the Job is active will result in unexpected behavior. You must specify either the Pool ID or the auto Pool specification, but not both. */ autoPoolSpecification?: AutoPoolSpecification; } @@ -1862,172 +963,70 @@ export interface PoolInformation { * create this auto Pool when the Job is submitted. */ export interface AutoPoolSpecification { - /** - * The Batch service assigns each auto Pool a unique identifier on creation. To - * distinguish between Pools created for different purposes, you can specify this - * element to add a prefix to the ID that is assigned. The prefix can be up to 20 - * characters long. - */ + /** A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. */ autoPoolIdPrefix?: string; /** - * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule - * are assigned to Pools. + * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. * * Possible values: jobschedule, job */ poolLifetimeOption: string; - /** - * If false, the Batch service deletes the Pool once its lifetime (as determined - * by the poolLifetimeOption setting) expires; that is, when the Job or Job - * Schedule completes. If true, the Batch service does not delete the Pool - * automatically. It is up to the user to delete auto Pools created with this - * option. - */ + /** Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. */ keepAlive?: boolean; - /** Specification for creating a new Pool. */ + /** The Pool specification for the auto Pool. */ pool?: PoolSpecification; } /** Specification for creating a new Pool. */ export interface PoolSpecification { - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ + /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ vmSize: string; - /** - * This property must be specified if the Pool needs to be created with Azure PaaS - * VMs. This property and virtualMachineConfiguration are mutually exclusive and - * one of the properties must be specified. If neither is specified then the Batch - * service returns an error; if you are calling the REST API directly, the HTTP - * status code is 400 (Bad Request). This property cannot be specified if the - * Batch Account was created with its poolAllocationMode property set to - * 'UserSubscription'. - */ + /** The cloud service configuration for the Pool. This property must be specified if the Pool needs to be created with Azure PaaS VMs. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ cloudServiceConfiguration?: CloudServiceConfiguration; - /** - * This property must be specified if the Pool needs to be created with Azure IaaS - * VMs. This property and cloudServiceConfiguration are mutually exclusive and one - * of the properties must be specified. If neither is specified then the Batch - * service returns an error; if you are calling the REST API directly, the HTTP - * status code is 400 (Bad Request). - */ + /** The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ virtualMachineConfiguration?: VirtualMachineConfiguration; - /** - * The default value is 1. The maximum value is the smaller of 4 times the number - * of cores of the vmSize of the pool or 256. - */ + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ taskSlotsPerNode?: number; - /** If not specified, the default is spread. */ + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ taskSchedulingPolicy?: TaskSchedulingPolicy; - /** - * This timeout applies only to manual scaling; it has no effect when - * enableAutoScale is set to true. The default value is 15 minutes. The minimum - * value is 5 minutes. If you specify a value less than 5 minutes, the Batch - * service rejects the request with an error; if you are calling the REST API - * directly, the HTTP status code is 400 (Bad Request). - */ + /** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ resizeTimeout?: string; - /** - * This property must not be specified if enableAutoScale is set to true. If - * enableAutoScale is set to false, then you must set either targetDedicatedNodes, - * targetLowPriorityNodes, or both. - */ + /** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ targetDedicatedNodes?: number; - /** - * This property must not be specified if enableAutoScale is set to true. If - * enableAutoScale is set to false, then you must set either targetDedicatedNodes, - * targetLowPriorityNodes, or both. - */ + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ targetLowPriorityNodes?: number; - /** - * If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must - * be specified. If true, the autoScaleFormula element is required. The Pool - * automatically resizes according to the formula. The default value is false. - */ + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The Pool automatically resizes according to the formula. The default value is false. */ enableAutoScale?: boolean; - /** - * This property must not be specified if enableAutoScale is set to false. It is - * required if enableAutoScale is set to true. The formula is checked for validity - * before the Pool is created. If the formula is not valid, the Batch service - * rejects the request with detailed error information. - */ + /** The formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. */ autoScaleFormula?: string; - /** - * The default value is 15 minutes. The minimum and maximum value are 5 minutes - * and 168 hours respectively. If you specify a value less than 5 minutes or - * greater than 168 hours, the Batch service rejects the request with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). - */ + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ autoScaleEvaluationInterval?: string; - /** - * Enabling inter-node communication limits the maximum size of the Pool due to - * deployment restrictions on the Compute Nodes of the Pool. This may result in - * the Pool not reaching its desired size. The default value is false. - */ + /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. */ enableInterNodeCommunication?: boolean; - /** The network configuration for a Pool. */ + /** The network configuration for the Pool. */ networkConfiguration?: NetworkConfiguration; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. */ startTask?: StartTask; /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. + * Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ certificateReferences?: Array; - /** - * When creating a pool, the package's application ID must be fully qualified - * (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - * Changes to Package references affect all new Nodes joining the Pool, but do not - * affect Compute Nodes that are already in the Pool until they are rebooted or - * reimaged. There is a maximum of 10 Package references on any given Pool. - */ + /** The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ applicationPackageReferences?: Array; - /** - * The list of application licenses must be a subset of available Batch service - * application licenses. If a license is requested which is not supported, Pool - * creation will fail. The permitted licenses available on the Pool are 'maya', - * 'vray', '3dsmax', 'arnold'. An additional charge applies for each application - * license added to the Pool. - */ + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. The permitted licenses available on the Pool are 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each application license added to the Pool. */ applicationLicenses?: string[]; /** The list of user Accounts to be created on each Compute Node in the Pool. */ userAccounts?: Array; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ + /** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; - /** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ + /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ mountConfiguration?: Array; /** - * If omitted, the default value is Default. + * The desired node communication mode for the pool. If omitted, the default value is Default. * * Possible values: default, classic, simplified */ @@ -2036,87 +1035,96 @@ export interface PoolSpecification { /** The network configuration for the Job. */ export interface JobNetworkConfiguration { - /** - * The virtual network must be in the same region and subscription as the Azure - * Batch Account. The specified subnet should have enough free IP addresses to - * accommodate the number of Compute Nodes which will run Tasks from the Job. This - * can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' - * service principal must have the 'Classic Virtual Machine Contributor' - * Role-Based Access Control (RBAC) role for the specified VNet so that Azure - * Batch service can schedule Tasks on the Nodes. This can be verified by checking - * if the specified VNet has any associated Network Security Groups (NSG). If - * communication to the Nodes in the specified subnet is denied by an NSG, then - * the Batch service will set the state of the Compute Nodes to unusable. This is - * of the form - * /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - * If the specified VNet has any associated Network Security Groups (NSG), then a - * few reserved system ports must be enabled for inbound communication from the - * Azure Batch service. For Pools created with a Virtual Machine configuration, - * enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for - * Windows. Port 443 is also required to be open for outbound connections for - * communications to Azure Storage. For more details see: - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - */ + /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId: string; } /** Contains information about the execution of a Job in the Azure Batch service. */ export interface JobExecutionInformation { - /** This is the time at which the Job was created. */ + /** The start time of the Job. This is the time at which the Job was created. */ startTime: Date | string; - /** This property is set only if the Job is in the completed state. */ + /** The completion time of the Job. This property is set only if the Job is in the completed state. */ endTime?: Date | string; - /** - * This element contains the actual Pool where the Job is assigned. When you get - * Job details from the service, they also contain a poolInfo element, which - * contains the Pool configuration data from when the Job was added or updated. - * That poolInfo element may also contain a poolId element. If it does, the two - * IDs are the same. If it does not, it means the Job ran on an auto Pool, and - * this property contains the ID of that auto Pool. - */ + /** The ID of the Pool to which this Job is assigned. This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool. */ poolId?: string; - /** This property is not set if there was no error starting the Job. */ + /** Details of any error encountered by the service in starting the Job. This property is not set if there was no error starting the Job. */ schedulingError?: JobSchedulingError; - /** - * This property is set only if the Job is in the completed state. If the Batch - * service terminates the Job, it sets the reason as follows: JMComplete - the Job - * Manager Task completed, and killJobOnCompletion was set to true. - * MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. - * TerminateJobSchedule - the Job ran as part of a schedule, and the schedule - * terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to - * terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's - * onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in - * the Job failed with an exit condition that specified a jobAction of - * terminatejob. Any other string is a user-defined reason specified in a call to - * the 'Terminate a Job' operation. - */ + /** A string describing the reason the Job ended. This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation. */ terminateReason?: string; } /** An error encountered by the Batch service when scheduling a Job. */ export interface JobSchedulingError { /** - * The category of the error. + * The category of the Job scheduling error. * * Possible values: usererror, servererror */ category: string; - /** - * An identifier for the Job scheduling error. Codes are invariant and are - * intended to be consumed programmatically. - */ + /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; - /** - * A message describing the Job scheduling error, intended to be suitable for - * display in a user interface. - */ + /** A message describing the Job scheduling error, intended to be suitable for display in a user interface. */ message?: string; /** A list of additional error details related to the scheduling error. */ details?: Array; } -/** Options when disabling a Job. */ -export interface BatchJobDisableParameters { +/** Resource usage statistics for a Job. */ +export interface JobStatistics { + /** The URL of the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: Date | string; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date | string; + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ + userCPUTime: string; + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ + kernelCPUTime: string; + /** The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */ + wallClockTime: string; + /** The total number of disk read operations made by all Tasks in the Job. */ + readIOps: number; + /** The total number of disk write operations made by all Tasks in the Job. */ + writeIOps: number; + /** The total amount of data in GiB read from disk by all Tasks in the Job. */ + readIOGiB: number; + /** The total amount of data in GiB written to disk by all Tasks in the Job. */ + writeIOGiB: number; + /** The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. */ + numSucceededTasks: number; + /** The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. */ + numFailedTasks: number; + /** The total number of retries on all the Tasks in the Job during the given time range. */ + numTaskRetries: number; + /** The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */ + waitTime: string; +} + +/** Options for updating an Azure Batch Job. */ +export interface BatchJobUpdateOptions { + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. */ + constraints?: JobConstraints; + /** The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. */ + poolInfo?: PoolInformation; + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + * + * Possible values: noaction, terminatejob + */ + onAllTasksComplete?: string; + /** A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. */ + metadata?: Array; +} + +/** Options for disabling an Azure Batch Job. */ +export interface BatchJobDisableOptions { /** * What to do with active Tasks associated with the Job. * @@ -2125,48 +1133,77 @@ export interface BatchJobDisableParameters { disableTasks: string; } -/** Options when terminating a Job. */ -export interface BatchJobTerminateParameters { - /** - * The text you want to appear as the Job's TerminateReason. The default is - * 'UserTerminate'. - */ +/** Options for terminating an Azure Batch Job. */ +export interface BatchJobTerminateOptions { + /** The text you want to appear as the Job's TerminateReason. The default is 'UserTerminate'. */ terminateReason?: string; } -/** Contains information about the container which a Task is executing. */ -export interface TaskContainerExecutionInformation { - /** The ID of the container. */ - containerId?: string; +/** Options for creating an Azure Batch Job. */ +export interface BatchJobCreateOptions { + /** A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + id: string; + /** The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ + usesTaskDependencies?: boolean; + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The execution constraints for the Job. */ + constraints?: JobConstraints; + /** Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. */ + jobManagerTask?: JobManagerTask; + /** The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ + jobPreparationTask?: JobPreparationTask; + /** The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. */ + jobReleaseTask?: JobReleaseTask; + /** The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ + commonEnvironmentSettings?: Array; + /** The Pool on which the Batch service runs the Job's Tasks. */ + poolInfo: PoolInformation; /** - * This is the state of the container according to the Docker service. It is - * equivalent to the status field returned by "docker inspect". + * The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + * + * Possible values: noaction, terminatejob */ - state?: string; + onAllTasksComplete?: string; /** - * This is the detailed error string from the Docker service, if available. It is - * equivalent to the error field returned by "docker inspect". - */ + * The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: noaction, performexitoptionsjobaction + */ + onTaskFailure?: string; + /** The network configuration for the Job. */ + networkConfiguration?: JobNetworkConfiguration; + /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: Array; +} + +/** Contains information about the container which a Task is executing. */ +export interface TaskContainerExecutionInformation { + /** The ID of the container. */ + containerId?: string; + /** The state of the container. This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect". */ + state?: string; + /** Detailed error information about the container. This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect". */ error?: string; } /** Information about a Task failure. */ export interface TaskFailureInformation { /** - * The category of the error. + * The category of the Task error. * * Possible values: usererror, servererror */ category: string; - /** - * An identifier for the Task error. Codes are invariant and are intended to be - * consumed programmatically. - */ + /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; - /** - * A message describing the Task error, intended to be suitable for display in a - * user interface. - */ + /** A message describing the Task error, intended to be suitable for display in a user interface. */ message?: string; /** A list of additional details related to the error. */ details?: Array; @@ -2176,43 +1213,30 @@ export interface TaskFailureInformation { * A Certificate that can be installed on Compute Nodes and can be used to * authenticate operations on the machine. */ -export interface Certificate { - /** - * The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex - * digits. - */ - thumbprint?: string; - /** The algorithm used to derive the thumbprint. */ - thumbprintAlgorithm?: string; +export interface BatchCertificate { + /** The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). */ + thumbprint: string; + /** The algorithm used to derive the thumbprint. This must be sha1. */ + thumbprintAlgorithm: string; /** The base64-encoded contents of the Certificate. The maximum size is 10KB. */ - data?: string; + data: string; /** * The format of the Certificate data. * * Possible values: pfx, cer */ certificateFormat?: string; - /** This must be omitted if the Certificate format is cer. */ + /** The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. */ password?: string; } /** An error encountered by the Batch service when deleting a Certificate. */ export interface DeleteCertificateError { - /** - * An identifier for the Certificate deletion error. Codes are invariant and are - * intended to be consumed programmatically. - */ + /** An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; - /** - * A message describing the Certificate deletion error, intended to be suitable - * for display in a user interface. - */ + /** A message describing the Certificate deletion error, intended to be suitable for display in a user interface. */ message?: string; - /** - * This list includes details such as the active Pools and Compute Nodes - * referencing this Certificate. However, if a large number of resources reference - * the Certificate, the list contains only about the first hundred. - */ + /** A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. */ values?: Array; } @@ -2221,21 +1245,11 @@ export interface DeleteCertificateError { * specification used to create each Job. */ export interface BatchJobSchedule { - /** A string that uniquely identifies the schedule within the Account. */ - id?: string; - /** The display name for the schedule. */ - displayName?: string; - /** - * All times are fixed respective to UTC and are not impacted by daylight saving - * time. - */ - schedule?: Schedule; - /** Specifies details of the Jobs to be created on a schedule. */ - jobSpecification?: JobSpecification; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ + /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. */ + schedule: Schedule; + /** The details of the Jobs to be created on this schedule. */ + jobSpecification: JobSpecification; + /** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; } @@ -2244,137 +1258,55 @@ export interface BatchJobSchedule { * respective to UTC and are not impacted by daylight saving time. */ export interface Schedule { - /** - * If you do not specify a doNotRunUntil time, the schedule becomes ready to - * create Jobs immediately. - */ + /** The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately. */ doNotRunUntil?: Date | string; - /** - * If you do not specify a doNotRunAfter time, and you are creating a recurring - * Job Schedule, the Job Schedule will remain active until you explicitly - * terminate it. - */ + /** A time after which no Job will be created under this Job Schedule. The schedule will move to the completed state as soon as this deadline is past and there is no active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it. */ doNotRunAfter?: Date | string; - /** - * If a Job is not created within the startWindow interval, then the 'opportunity' - * is lost; no Job will be created until the next recurrence of the schedule. If - * the schedule is recurring, and the startWindow is longer than the recurrence - * interval, then this is equivalent to an infinite startWindow, because the Job - * that is 'due' in one recurrenceInterval is not carried forward into the next - * recurrence interval. The default is infinite. The minimum value is 1 minute. If - * you specify a lower value, the Batch service rejects the schedule with an - * error; if you are calling the REST API directly, the HTTP status code is 400 - * (Bad Request). - */ + /** The time interval, starting from the time at which the schedule indicates a Job should be created, within which a Job must be created. If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ startWindow?: string; - /** - * Because a Job Schedule can have at most one active Job under it at any given - * time, if it is time to create a new Job under a Job Schedule, but the previous - * Job is still running, the Batch service will not create the new Job until the - * previous Job finishes. If the previous Job does not finish within the - * startWindow period of the new recurrenceInterval, then no new Job will be - * scheduled for that interval. For recurring Jobs, you should normally specify a - * jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you - * will need an external process to monitor when Jobs are created, add Tasks to - * the Jobs and terminate the Jobs ready for the next recurrence. The default is - * that the schedule does not recur: one Job is created, within the startWindow - * after the doNotRunUntil time, and the schedule is complete as soon as that Job - * finishes. The minimum value is 1 minute. If you specify a lower value, the - * Batch service rejects the schedule with an error; if you are calling the REST - * API directly, the HTTP status code is 400 (Bad Request). - */ + /** The time interval between the start times of two successive Jobs under the Job Schedule. A Job Schedule can have at most one active Job under it at any given time. Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ recurrenceInterval?: string; } /** Specifies details of the Jobs to be created on a schedule. */ export interface JobSpecification { - /** - * Priority values can range from -1000 to 1000, with -1000 being the lowest - * priority and 1000 being the highest priority. The default value is 0. This - * priority is used as the default for all Jobs under the Job Schedule. You can - * update a Job's priority after it has been created using by using the update Job - * API. - */ + /** The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. */ priority?: number; - /** - * If the value is set to True, other high priority jobs submitted to the system - * will take precedence and will be able requeue tasks from this job. You can - * update a job's allowTaskPreemption after it has been created using the update - * job API. - */ + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ allowTaskPreemption?: boolean; - /** - * The value of maxParallelTasks must be -1 or greater than 0 if specified. If not - * specified, the default value is -1, which means there's no limit to the number - * of tasks that can be run at once. You can update a job's maxParallelTasks after - * it has been created using the update job API. - */ + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ maxParallelTasks?: number; - /** - * The name need not be unique and can contain any Unicode characters up to a - * maximum length of 1024. - */ + /** The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** - * Whether Tasks in the Job can define dependencies on each other. The default is - * false. - */ + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ usesTaskDependencies?: boolean; /** - * Note that if a Job contains no Tasks, then all Tasks are considered complete. - * This option is therefore most commonly used with a Job Manager task; if you - * want to use automatic Job termination without a Job Manager, you should - * initially set onAllTasksComplete to noaction and update the Job properties to - * set onAllTasksComplete to terminatejob once you have finished adding Tasks. The - * default is noaction. + * The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. * * Possible values: noaction, terminatejob */ onAllTasksComplete?: string; /** - * The default is noaction. + * The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. * * Possible values: noaction, performexitoptionsjobaction */ onTaskFailure?: string; /** The network configuration for the Job. */ networkConfiguration?: JobNetworkConfiguration; - /** The execution constraints for a Job. */ + /** The execution constraints for Jobs created under this schedule. */ constraints?: JobConstraints; - /** - * If the Job does not specify a Job Manager Task, the user must explicitly add - * Tasks to the Job using the Task API. If the Job does specify a Job Manager - * Task, the Batch service creates the Job Manager Task when the Job is created, - * and will try to schedule the Job Manager Task before scheduling other Tasks in - * the Job. - */ + /** The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. */ jobManagerTask?: JobManagerTask; - /** - * If a Job has a Job Preparation Task, the Batch service will run the Job - * Preparation Task on a Node before starting any Tasks of that Job on that - * Compute Node. - */ + /** The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ jobPreparationTask?: JobPreparationTask; - /** - * The primary purpose of the Job Release Task is to undo changes to Nodes made by - * the Job Preparation Task. Example activities include deleting local files, or - * shutting down services that were started as part of Job preparation. A Job - * Release Task cannot be specified without also specifying a Job Preparation Task - * for the Job. The Batch service runs the Job Release Task on the Compute Nodes - * that have run the Job Preparation Task. - */ + /** The Job Release Task for Jobs created under this schedule. The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task. */ jobReleaseTask?: JobReleaseTask; - /** - * Individual Tasks can override an environment setting specified here by - * specifying the same setting name with a different value. - */ + /** A list of common environment variable settings. These environment variables are set for all Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ commonEnvironmentSettings?: Array; - /** Specifies how a Job should be assigned to a Pool. */ + /** The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. */ poolInfo: PoolInformation; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ + /** A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; } @@ -2383,18 +1315,11 @@ export interface JobSpecification { * Schedule. */ export interface JobScheduleExecutionInformation { - /** - * This property is meaningful only if the schedule is in the active state when - * the time comes around. For example, if the schedule is disabled, no Job will be - * created at nextRunTime unless the Job is enabled before then. - */ + /** The next time at which a Job will be created under this schedule. This property is meaningful only if the schedule is in the active state when the time comes around. For example, if the schedule is disabled, no Job will be created at nextRunTime unless the Job is enabled before then. */ nextRunTime?: Date | string; - /** - * This property is present only if the at least one Job has run under the - * schedule. - */ + /** Information about the most recent Job under the Job Schedule. This property is present only if the at least one Job has run under the schedule. */ recentJob?: RecentJob; - /** This property is set only if the Job Schedule is in the completed state. */ + /** The time at which the schedule ended. This property is set only if the Job Schedule is in the completed state. */ endTime?: Date | string; } @@ -2412,214 +1337,103 @@ export interface JobScheduleStatistics { url: string; /** The start time of the time range covered by the statistics. */ startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ lastUpdateTime: Date | string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in all Jobs created under the schedule. - */ + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. */ userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in all Jobs created under the schedule. - */ + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. */ kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on a - * Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If a Task was retried, this - * includes the wall clock time of all the Task retries. - */ + /** The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */ wallClockTime: string; - /** - * The total number of disk read operations made by all Tasks in all Jobs created - * under the schedule. - */ + /** The total number of disk read operations made by all Tasks in all Jobs created under the schedule. */ readIOps: number; - /** - * The total number of disk write operations made by all Tasks in all Jobs created - * under the schedule. - */ + /** The total number of disk write operations made by all Tasks in all Jobs created under the schedule. */ writeIOps: number; - /** - * The total gibibytes read from disk by all Tasks in all Jobs created under the - * schedule. - */ + /** The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. */ readIOGiB: number; - /** - * The total gibibytes written to disk by all Tasks in all Jobs created under the - * schedule. - */ + /** The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. */ writeIOGiB: number; - /** - * The total number of Tasks successfully completed during the given time range in - * Jobs created under the schedule. A Task completes successfully if it returns - * exit code 0. - */ + /** The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. */ numSucceededTasks: number; - /** - * The total number of Tasks that failed during the given time range in Jobs - * created under the schedule. A Task fails if it exhausts its maximum retry count - * without returning exit code 0. - */ + /** The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. */ numFailedTasks: number; - /** - * The total number of retries during the given time range on all Tasks in all - * Jobs created under the schedule. - */ + /** The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. */ numTaskRetries: number; - /** - * This value is only reported in the Account lifetime statistics; it is not - * included in the Job statistics. - */ + /** The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */ waitTime: string; } -/** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. - */ -export interface BatchTask { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores, and cannot contain more than 64 characters. - */ - id?: string; - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ +/** Options for updating an Azure Batch Job Schedule. */ +export interface BatchJobScheduleUpdateOptions { + /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. If you do not specify this element, the existing schedule is left unchanged. */ + schedule?: Schedule; + /** The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification. */ + jobSpecification?: JobSpecification; + /** A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged. */ + metadata?: Array; +} + +/** Options for creating an Azure Batch Job Schedule */ +export interface BatchJobScheduleCreateOptions { + /** A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + id: string; + /** The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. */ + schedule: Schedule; + /** The details of the Jobs to be created on this schedule. */ + jobSpecification: JobSpecification; + /** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: Array; +} + +/** Options for creating an Azure Batch Task. */ +export interface BatchTaskCreateOptions { + /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). */ + id: string; + /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; /** How the Batch service should respond when the Task completes. */ exitConditions?: ExitConditions; - /** - * For multi-instance Tasks, the command line is executed as the primary Task, - * after the primary Task and all subtasks have finished executing the - * coordination command line. The command line does not run under a shell, and - * therefore cannot take advantage of shell features such as environment variable - * expansion. If you want to take advantage of such features, you should invoke - * the shell in the command line, for example using "cmd /c MyCommand" in - * Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - * file paths, it should use a relative path (relative to the Task working - * directory), or use the Batch provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine?: string; - /** - * If the Pool that will run this Task has containerConfiguration set, this must - * be set as well. If the Pool that will run this Task doesn't have - * containerConfiguration set, this must not be set. When this is specified, all - * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure - * Batch directories on the node) are mapped into the container, all Task - * environment variables are mapped into the container, and the Task command line - * is executed in the container. Files produced in the container outside of - * AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that - * Batch file APIs will not be able to access those files. - */ + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettings; - /** - * For multi-instance Tasks, the resource files will only be downloaded to the - * Compute Node on which the primary Task is executed. There is a maximum size for - * the list of resource files. When the max size is exceeded, the request will - * fail and the response error code will be RequestEntityTooLarge. If this occurs, - * the collection of ResourceFiles must be reduced in size. This can be achieved - * using .zip files, Application Packages, or Docker Containers. - */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ resourceFiles?: Array; - /** - * For multi-instance Tasks, the files will only be uploaded from the Compute Node - * on which the primary Task is executed. - */ + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ outputFiles?: Array; /** A list of environment variable settings for the Task. */ environmentSettings?: Array; - /** - * A locality hint that can be used by the Batch service to select a Compute Node - * on which to start a Task. - */ + /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ affinityInfo?: AffinityInformation; - /** Execution constraints to apply to a Task. */ + /** The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. */ constraints?: TaskConstraints; - /** - * The default is 1. A Task can only be scheduled to run on a compute node if the - * node has enough free scheduling slots available. For multi-instance Tasks, this - * must be 1. - */ + /** The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ requiredSlots?: number; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ + /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ userIdentity?: UserIdentity; - /** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ + /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ multiInstanceSettings?: MultiInstanceSettings; - /** - * This Task will not be scheduled until all Tasks that it depends on have - * completed successfully. If any of those Tasks fail and exhaust their retry - * counts, this Task will never be scheduled. - */ + /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. */ dependsOn?: TaskDependencies; - /** - * Application packages are downloaded and deployed to a shared directory, not the - * Task working directory. Therefore, if a referenced package is already on the - * Node, and is up to date, then it is not re-downloaded; the existing copy on the - * Compute Node is used. If a referenced Package cannot be installed, for example - * because the package has been deleted or because download failed, the Task - * fails. - */ + /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ applicationPackageReferences?: Array; - /** - * If this property is set, the Batch service provides the Task with an - * authentication token which can be used to authenticate Batch service operations - * without requiring an Account access key. The token is provided via the - * AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the - * Task can carry out using the token depend on the settings. For example, a Task - * can request Job permissions in order to add other Tasks to the Job, or check - * the status of the Job or of other Tasks under the Job. - */ + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ authenticationTokenSettings?: AuthenticationTokenSettings; } /** Specifies how the Batch service should respond when the Task completes. */ export interface ExitConditions { - /** - * A list of individual Task exit codes and how the Batch service should respond - * to them. - */ + /** A list of individual Task exit codes and how the Batch service should respond to them. */ exitCodes?: Array; - /** - * A list of Task exit code ranges and how the Batch service should respond to - * them. - */ + /** A list of Task exit code ranges and how the Batch service should respond to them. */ exitCodeRanges?: Array; - /** Specifies how the Batch service responds to a particular exit condition. */ + /** How the Batch service should respond if the Task fails to start due to an error. */ preProcessingError?: ExitOptions; - /** - * If the Task exited with an exit code that was specified via exitCodes or - * exitCodeRanges, and then encountered a file upload error, then the action - * specified by the exit code takes precedence. - */ + /** How the Batch service should respond if a file upload error occurs. If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence. */ fileUploadError?: ExitOptions; - /** - * This value is used if the Task exits with any nonzero exit code not listed in - * the exitCodes or exitCodeRanges collection, with a pre-processing error if the - * preProcessingError property is not present, or with a file upload error if the - * fileUploadError property is not present. If you want non-default behavior on - * exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges - * collection. - */ + /** How the Batch service should respond if the Task fails with an exit condition not covered by any of the other properties. This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. */ default?: ExitOptions; } @@ -2630,26 +1444,20 @@ export interface ExitConditions { export interface ExitCodeMapping { /** A process exit code. */ code: number; - /** Specifies how the Batch service responds to a particular exit condition. */ + /** How the Batch service should respond if the Task exits with this exit code. */ exitOptions: ExitOptions; } /** Specifies how the Batch service responds to a particular exit condition. */ export interface ExitOptions { /** - * The default is none for exit code 0 and terminate for all other exit - * conditions. If the Job's onTaskFailed property is noaction, then specifying - * this property returns an error and the add Task request fails with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). + * An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). * * Possible values: none, disable, terminate */ jobAction?: string; /** - * Possible values are 'satisfy' (allowing dependent tasks to progress) and - * 'block' (dependent tasks continue to wait). Batch does not yet support - * cancellation of dependent tasks. + * An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. * * Possible values: satisfy, block */ @@ -2665,7 +1473,7 @@ export interface ExitCodeRangeMapping { start: number; /** The last exit code in the range. */ end: number; - /** Specifies how the Batch service responds to a particular exit condition. */ + /** How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive). */ exitOptions: ExitOptions; } @@ -2674,70 +1482,86 @@ export interface ExitCodeRangeMapping { * on which to start a Task. */ export interface AffinityInformation { - /** - * You can pass the affinityId of a Node to indicate that this Task needs to run - * on that Compute Node. Note that this is just a soft affinity. If the target - * Compute Node is busy or unavailable at the time the Task is scheduled, then the - * Task will be scheduled elsewhere. - */ + /** An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ affinityId: string; } +/** + * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, + * if any of the subtasks fail (for example due to exiting with a non-zero exit + * code) the entire multi-instance Task fails. The multi-instance Task is then + * terminated and retried, up to its retry limit. + */ +export interface MultiInstanceSettings { + /** The number of Compute Nodes required by the Task. If omitted, the default is 1. */ + numberOfInstances?: number; + /** The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. */ + coordinationCommandLine: string; + /** A list of files that the Batch service will download before running the coordination command line. The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + commonResourceFiles?: Array; +} + +/** + * Specifies any dependencies of a Task. Any Task that is explicitly specified or + * within a dependency range must complete before the dependant Task will be + * scheduled. + */ +export interface TaskDependencies { + /** The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead. */ + taskIds?: string[]; + /** The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete successfully before the dependent Task can be scheduled. */ + taskIdRanges?: Array; +} + +/** + * The start and end of the range are inclusive. For example, if a range has start + * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + */ +export interface TaskIdRange { + /** The first Task ID in the range. */ + start: number; + /** The last Task ID in the range. */ + end: number; +} + +/** + * Batch will retry Tasks when a recovery operation is triggered on a Node. + * Examples of recovery operations include (but are not limited to) when an + * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + * Retries due to recovery operations are independent of and are not counted + * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + * retry due to a recovery operation may occur. Because of this, all Tasks should + * be idempotent. This means Tasks need to tolerate being interrupted and + * restarted without causing any corruption or duplicate data. The best practice + * for long running Tasks is to use some form of checkpointing. + */ +export interface BatchTask { + /** The execution constraints that apply to this Task. */ + constraints?: TaskConstraints; +} + /** Information about the execution of a Task. */ export interface TaskExecutionInformation { - /** - * 'Running' corresponds to the running state, so if the Task specifies resource - * files or Packages, then the start time reflects the time at which the Task - * started downloading or deploying these. If the Task has been restarted or - * retried, this is the most recent time at which the Task started running. This - * property is present only for Tasks that are in the running or completed state. - */ + /** The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state. */ startTime?: Date | string; - /** This property is set only if the Task is in the Completed state. */ + /** The time at which the Task completed. This property is set only if the Task is in the Completed state. */ endTime?: Date | string; - /** - * This property is set only if the Task is in the completed state. In general, - * the exit code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. However, if the Batch service terminates the Task (due - * to timeout, or user termination via the API) you may see an operating - * system-defined exit code. - */ + /** The exit code of the program specified on the Task command line. This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ exitCode?: number; - /** This property is set only if the Task runs in a container context. */ + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ containerInfo?: TaskContainerExecutionInformation; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: TaskFailureInformation; - /** - * Task application failures (non-zero exit code) are retried, pre-processing - * errors (the Task could not be run) and file upload errors are not retried. The - * Batch service will retry the Task up to the limit specified by the constraints. - */ + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ retryCount: number; - /** - * This element is present only if the Task was retried (i.e. retryCount is - * nonzero). If present, this is typically the same as startTime, but may be - * different if the Task has been restarted for reasons other than retry; for - * example, if the Compute Node was rebooted during a retry, then the startTime is - * updated but the lastRetryTime is not. - */ + /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ lastRetryTime?: Date | string; - /** - * When the user removes Compute Nodes from a Pool (by resizing/shrinking the - * pool) or when the Job is being disabled, the user can specify that running - * Tasks on the Compute Nodes be requeued for execution. This count tracks how - * many times the Task has been requeued for these reasons. - */ + /** The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons. */ requeueCount: number; - /** This property is set only if the requeueCount is nonzero. */ + /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ lastRequeueTime?: Date | string; /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. * * Possible values: success, failure */ @@ -2745,11 +1569,8 @@ export interface TaskExecutionInformation { } /** Information about the Compute Node on which a Task ran. */ -export interface ComputeNodeInformation { - /** - * An identifier for the Node on which the Task ran, which can be passed when - * adding a Task to request that the Task be scheduled on this Compute Node. - */ +export interface BatchNodeInformation { + /** An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. */ affinityId?: string; /** The URL of the Compute Node on which the Task ran. */ nodeUrl?: string; @@ -2763,62 +1584,19 @@ export interface ComputeNodeInformation { taskRootDirectoryUrl?: string; } -/** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ -export interface MultiInstanceSettings { - /** If omitted, the default is 1. */ - numberOfInstances?: number; - /** - * A typical coordination command line launches a background service and verifies - * that the service is ready to process inter-node messages. - */ - coordinationCommandLine: string; - /** - * The difference between common resource files and Task resource files is that - * common resource files are downloaded for all subtasks including the primary, - * whereas Task resource files are downloaded only for the primary. Also note that - * these resource files are not downloaded to the Task working directory, but - * instead are downloaded to the Task root directory (one directory above the - * working directory). There is a maximum size for the list of resource files. - * When the max size is exceeded, the request will fail and the response error - * code will be RequestEntityTooLarge. If this occurs, the collection of - * ResourceFiles must be reduced in size. This can be achieved using .zip files, - * Application Packages, or Docker Containers. - */ - commonResourceFiles?: Array; -} - /** Resource usage statistics for a Task. */ export interface TaskStatistics { /** The URL of the statistics. */ url: string; /** The start time of the time range covered by the statistics. */ startTime: Date | string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ lastUpdateTime: Date | string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by the Task. - */ + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by the Task. - */ + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on a - * Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If the Task was retried, this - * includes the wall clock time of all the Task retries. - */ + /** The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. */ wallClockTime: string; /** The total number of disk read operations made by the Task. */ readIOps: number; @@ -2828,164 +1606,78 @@ export interface TaskStatistics { readIOGiB: number; /** The total gibibytes written to disk by the Task. */ writeIOGiB: number; - /** - * The total wait time of the Task. The wait time for a Task is defined as the - * elapsed time between the creation of the Task and the start of Task execution. - * (If the Task is retried due to failures, the wait time is the time to the most - * recent Task execution.) - */ + /** The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). */ waitTime: string; } -/** - * Specifies any dependencies of a Task. Any Task that is explicitly specified or - * within a dependency range must complete before the dependant Task will be - * scheduled. - */ -export interface TaskDependencies { - /** - * The taskIds collection is limited to 64000 characters total (i.e. the combined - * length of all Task IDs). If the taskIds collection exceeds the maximum length, - * the Add Task request fails with error code TaskDependencyListTooLong. In this - * case consider using Task ID ranges instead. - */ - taskIds?: string[]; - /** - * The list of Task ID ranges that this Task depends on. All Tasks in all ranges - * must complete successfully before the dependent Task can be scheduled. - */ - taskIdRanges?: Array; -} - -/** - * The start and end of the range are inclusive. For example, if a range has start - * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - */ -export interface TaskIdRange { - /** The first Task ID in the range. */ - start: number; - /** The last Task ID in the range. */ - end: number; -} - /** A collection of Azure Batch Tasks to add. */ export interface BatchTaskCollection { - /** - * The total serialized size of this collection must be less than 1MB. If it is - * greater than 1MB (for example if each Task has 100's of resource files or - * environment variables), the request will fail with code 'RequestBodyTooLarge' - * and should be retried again with fewer Tasks. - */ - value: Array; + /** The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. */ + value: Array; } -/** A user Account for RDP or SSH access on a Compute Node. */ -export interface ComputeNodeUser { +/** Options for creating a user account for RDP or SSH access on an Azure Batch Compute Node. */ +export interface BatchNodeUserCreateOptions { /** The user name of the Account. */ name: string; - /** The default value is false. */ + /** Whether the Account should be an administrator on the Compute Node. The default value is false. */ isAdmin?: boolean; - /** - * If omitted, the default is 1 day from the current time. For Linux Compute - * Nodes, the expiryTime has a precision up to a day. - */ + /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ expiryTime?: Date | string; - /** - * The password is required for Windows Compute Nodes (those created with - * 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' - * using a Windows Image reference). For Linux Compute Nodes, the password can - * optionally be specified along with the sshPublicKey property. - */ + /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. */ password?: string; - /** - * The public key should be compatible with OpenSSH encoding and should be base 64 - * encoded. This property can be specified only for Linux Compute Nodes. If this - * is specified for a Windows Compute Node, then the Batch service rejects the - * request; if you are calling the REST API directly, the HTTP status code is 400 - * (Bad Request). - */ + /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ sshPublicKey?: string; } -/** The set of changes to be made to a user Account on a Compute Node. */ -export interface NodeUpdateUserParameters { - /** - * The password is required for Windows Compute Nodes (those created with - * 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' - * using a Windows Image reference). For Linux Compute Nodes, the password can - * optionally be specified along with the sshPublicKey property. If omitted, any - * existing password is removed. - */ +/** Options for updating a user account for RDP or SSH access on an Azure Batch Compute Node. */ +export interface BatchNodeUserUpdateOptions { + /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */ password?: string; - /** - * If omitted, the default is 1 day from the current time. For Linux Compute - * Nodes, the expiryTime has a precision up to a day. - */ + /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ expiryTime?: Date | string; - /** - * The public key should be compatible with OpenSSH encoding and should be base 64 - * encoded. This property can be specified only for Linux Compute Nodes. If this - * is specified for a Windows Compute Node, then the Batch service rejects the - * request; if you are calling the REST API directly, the HTTP status code is 400 - * (Bad Request). If omitted, any existing SSH public key is removed. - */ + /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. */ sshPublicKey?: string; } -/** Options for rebooting a Compute Node. */ -export interface NodeRebootParameters { +/** Options for rebooting an Azure Batch Compute Node. */ +export interface NodeRebootOptions { /** - * The default value is requeue. + * When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. * * Possible values: requeue, terminate, taskcompletion, retaineddata */ nodeRebootOption?: string; } -/** Options for reimaging a Compute Node. */ -export interface NodeReimageParameters { +/** Options for reimaging an Azure Batch Compute Node. */ +export interface NodeReimageOptions { /** - * The default value is requeue. + * When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. * * Possible values: requeue, terminate, taskcompletion, retaineddata */ nodeReimageOption?: string; } -/** Options for disabling scheduling on a Compute Node. */ -export interface NodeDisableSchedulingParameters { +/** Options for disabling scheduling on an Azure Batch Compute Node. */ +export interface NodeDisableSchedulingOptions { /** - * The default value is requeue. + * What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. * * Possible values: requeue, terminate, taskcompletion */ nodeDisableSchedulingOption?: string; } -/** The Azure Batch service log files upload configuration for a Compute Node. */ -export interface UploadBatchServiceLogsConfiguration { - /** - * If a user assigned managed identity is not being used, the URL must include a - * Shared Access Signature (SAS) granting write permissions to the container. The - * SAS duration must allow enough time for the upload to finish. The start time - * for SAS is optional and recommended to not be specified. - */ +/** The Azure Batch service log files upload options for a Compute Node. */ +export interface UploadBatchServiceLogsOptions { + /** The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. */ containerUrl: string; - /** - * Any log file containing a log message in the time range will be uploaded. This - * means that the operation might retrieve more logs than have been requested - * since the entire log file is always uploaded, but the operation should not - * retrieve fewer logs than have been requested. - */ + /** The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. */ startTime: Date | string; - /** - * Any log file containing a log message in the time range will be uploaded. This - * means that the operation might retrieve more logs than have been requested - * since the entire log file is always uploaded, but the operation should not - * retrieve fewer logs than have been requested. If omitted, the default is to - * upload all logs available after the startTime. - */ + /** The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. */ endTime?: Date | string; - /** The identity must have write access to the Azure Blob Storage container. */ - identityReference?: object; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ + identityReference?: BatchNodeIdentityReference; } diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/outputModels.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/outputModels.ts index e81ab815a4..b9433c67f1 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/outputModels.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/outputModels.ts @@ -1,318 +1,87 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { Paged } from "@azure/core-paging"; +/** The parameters for a widget status request */ +export interface BatchApplicationListHeadersOutput + extends BatchClientRequestHeadersOutput {} + +/** The parameters for a widget status request */ +export interface BatchClientRequestHeadersOutput {} + +/** Common path parms for Task related File operartions */ +export interface BatchTaskFileClientPathParametersOutput + extends BatchClientRequestHeadersOutput {} + +/** Common path parms for Node related File operartions */ +export interface BatchNodeFileClientPathParametersOutput + extends BatchClientRequestHeadersOutput {} /** The result of listing the applications available in an Account. */ export interface ApplicationListResultOutput { /** The list of applications available in the Account. */ - value?: Array; + value?: Array; /** The URL to get the next set of results. */ "odata.nextLink"?: string; } /** Contains information about an application in an Azure Batch Account. */ -export interface ApplicationOutput { +export interface BatchApplicationOutput { /** A string that uniquely identifies the application within the Account. */ - readonly id: string; + id: string; /** The display name for the application. */ displayName: string; /** The list of available versions of the application. */ versions: string[]; } +/** An error response received from the Azure Batch service. */ +export interface BatchErrorOutput { + /** An identifier for the error. Codes are invariant and are intended to be consumed programmatically. */ + code: string; + /** A message describing the error, intended to be suitable for display in a user interface. */ + message?: ErrorMessageOutput; + /** A collection of key-value pairs containing additional details about the error. */ + values?: Array; +} + +/** An error message received in an Azure Batch error response. */ +export interface ErrorMessageOutput { + /** The language code of the error message. */ + lang?: string; + /** The text of the message. */ + value?: string; +} + +/** An item of additional information included in an Azure Batch error response. */ +export interface BatchErrorDetailOutput { + /** An identifier specifying the meaning of the Value property. */ + key?: string; + /** The additional information included with the error response. */ + value?: string; +} + +/** The result of a listing the usage metrics for an Account. */ +export interface PoolListUsageMetricsResultOutput { + /** The Pool usage metrics data. */ + value?: Array; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + /** Usage metrics for a Pool across an aggregation interval. */ export interface PoolUsageMetricsOutput { /** The ID of the Pool whose metrics are aggregated in this entry. */ - readonly poolId: string; + poolId: string; /** The start time of the aggregation interval covered by this entry. */ startTime: string; /** The end time of the aggregation interval covered by this entry. */ endTime: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ + /** The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ vmSize: string; /** The total core hours used in the Pool during this aggregation interval. */ totalCoreHours: number; } -/** Contains utilization and resource usage statistics for the lifetime of a Pool. */ -export interface PoolStatisticsOutput { - /** The URL for the statistics. */ - readonly url: string; - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** Statistics related to Pool usage information. */ - usageStats?: UsageStatisticsOutput; - /** Statistics related to resource consumption by Compute Nodes in a Pool. */ - resourceStats?: ResourceStatisticsOutput; -} - -/** Statistics related to Pool usage information. */ -export interface UsageStatisticsOutput { - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** - * The aggregated wall-clock time of the dedicated Compute Node cores being part - * of the Pool. - */ - dedicatedCoreTime: string; -} - -/** Statistics related to resource consumption by Compute Nodes in a Pool. */ -export interface ResourceStatisticsOutput { - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** - * The average CPU usage across all Compute Nodes in the Pool (percentage per - * node). - */ - avgCPUPercentage: number; - /** The average memory usage in GiB across all Compute Nodes in the Pool. */ - avgMemoryGiB: number; - /** The peak memory usage in GiB across all Compute Nodes in the Pool. */ - peakMemoryGiB: number; - /** The average used disk space in GiB across all Compute Nodes in the Pool. */ - avgDiskGiB: number; - /** The peak used disk space in GiB across all Compute Nodes in the Pool. */ - peakDiskGiB: number; - /** The total number of disk read operations across all Compute Nodes in the Pool. */ - diskReadIOps: number; - /** The total number of disk write operations across all Compute Nodes in the Pool. */ - diskWriteIOps: number; - /** - * The total amount of data in GiB of disk reads across all Compute Nodes in the - * Pool. - */ - diskReadGiB: number; - /** - * The total amount of data in GiB of disk writes across all Compute Nodes in the - * Pool. - */ - diskWriteGiB: number; - /** - * The total amount of data in GiB of network reads across all Compute Nodes in - * the Pool. - */ - networkReadGiB: number; - /** - * The total amount of data in GiB of network writes across all Compute Nodes in - * the Pool. - */ - networkWriteGiB: number; -} - -/** A Pool in the Azure Batch service. */ -export interface BatchPoolOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores, and cannot contain more than 64 characters. The ID is - * case-preserving and case-insensitive (that is, you may not have two IDs within - * an Account that differ only by case). - */ - id?: string; - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ - displayName?: string; - /** The URL of the Pool. */ - readonly url?: string; - /** - * This is an opaque string. You can use it to detect whether the Pool has changed - * between requests. In particular, you can be pass the ETag when updating a Pool - * to specify that your changes should take effect only if nobody else has - * modified the Pool in the meantime. - */ - readonly eTag?: string; - /** - * This is the last time at which the Pool level data, such as the - * targetDedicatedNodes or enableAutoscale settings, changed. It does not factor - * in node-level changes such as a Compute Node changing state. - */ - readonly lastModified?: string; - /** The creation time of the Pool. */ - readonly creationTime?: string; - /** - * The current state of the Pool. - * - * Possible values: active, deleting - */ - readonly state?: string; - /** The time at which the Pool entered its current state. */ - readonly stateTransitionTime?: string; - /** - * Whether the Pool is resizing. - * - * Possible values: steady, resizing, stopping - */ - readonly allocationState?: string; - /** The time at which the Pool entered its current allocation state. */ - readonly allocationStateTransitionTime?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ - vmSize?: string; - /** - * This property and virtualMachineConfiguration are mutually exclusive and one of - * the properties must be specified. This property cannot be specified if the - * Batch Account was created with its poolAllocationMode property set to - * 'UserSubscription'. - */ - cloudServiceConfiguration?: CloudServiceConfigurationOutput; - /** - * This property and cloudServiceConfiguration are mutually exclusive and one of - * the properties must be specified. - */ - virtualMachineConfiguration?: VirtualMachineConfigurationOutput; - /** - * This is the timeout for the most recent resize operation. (The initial sizing - * when the Pool is created counts as a resize.) The default value is 15 minutes. - */ - resizeTimeout?: string; - /** - * This property is set only if one or more errors occurred during the last Pool - * resize, and only when the Pool allocationState is Steady. - */ - readonly resizeErrors?: Array; - /** The number of dedicated Compute Nodes currently in the Pool. */ - readonly currentDedicatedNodes?: number; - /** - * Spot/Low-priority Compute Nodes which have been preempted are included in this - * count. - */ - readonly currentLowPriorityNodes?: number; - /** The desired number of dedicated Compute Nodes in the Pool. */ - targetDedicatedNodes?: number; - /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ - targetLowPriorityNodes?: number; - /** - * If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must - * be specified. If true, the autoScaleFormula property is required and the Pool - * automatically resizes according to the formula. The default value is false. - */ - enableAutoScale?: boolean; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ - autoScaleFormula?: string; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ - autoScaleEvaluationInterval?: string; - /** - * This property is set only if the Pool automatically scales, i.e. - * enableAutoScale is true. - */ - readonly autoScaleRun?: AutoScaleRunOutput; - /** - * This imposes restrictions on which Compute Nodes can be assigned to the Pool. - * Specifying this value can reduce the chance of the requested number of Compute - * Nodes to be allocated in the Pool. - */ - enableInterNodeCommunication?: boolean; - /** The network configuration for a Pool. */ - networkConfiguration?: NetworkConfigurationOutput; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ - startTask?: StartTaskOutput; - /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. - */ - certificateReferences?: Array; - /** - * Changes to Package references affect all new Nodes joining the Pool, but do not - * affect Compute Nodes that are already in the Pool until they are rebooted or - * reimaged. There is a maximum of 10 Package references on any given Pool. - */ - applicationPackageReferences?: Array; - /** - * The list of application licenses must be a subset of available Batch service - * application licenses. If a license is requested which is not supported, Pool - * creation will fail. - */ - applicationLicenses?: string[]; - /** - * The default value is 1. The maximum value is the smaller of 4 times the number - * of cores of the vmSize of the pool or 256. - */ - taskSlotsPerNode?: number; - /** If not specified, the default is spread. */ - taskSchedulingPolicy?: TaskSchedulingPolicyOutput; - /** The list of user Accounts to be created on each Compute Node in the Pool. */ - userAccounts?: Array; - /** A list of name-value pairs associated with the Pool as metadata. */ - metadata?: Array; - /** - * This property is populated only if the CloudPool was retrieved with an expand - * clause including the 'stats' attribute; otherwise it is null. The statistics - * may not be immediately available. The Batch service performs periodic roll-up - * of statistics. The typical delay is about 30 minutes. - */ - readonly stats?: PoolStatisticsOutput; - /** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ - mountConfiguration?: Array; - /** - * The list of user identities associated with the Batch pool. The user identity - * dictionary key references will be ARM resource ids in the form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - */ - readonly identity?: BatchPoolIdentityOutput; - /** - * If omitted, the default value is Default. - * - * Possible values: default, classic, simplified - */ - targetNodeCommunicationMode?: string; - /** - * Determines how a pool communicates with the Batch service. - * - * Possible values: default, classic, simplified - */ - readonly currentNodeCommunicationMode?: string; -} - /** * The configuration for Compute Nodes in a Pool based on the Azure Cloud Services * platform. @@ -332,10 +101,7 @@ export interface CloudServiceConfigurationOutput { * (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). */ osFamily: string; - /** - * The default value is * which specifies the latest operating system version for - * the specified OS family. - */ + /** The Azure Guest OS version to be installed on the virtual machines in the Pool. The default value is * which specifies the latest operating system version for the specified OS family. */ osVersion?: string; } @@ -344,39 +110,13 @@ export interface CloudServiceConfigurationOutput { * Machines infrastructure. */ export interface VirtualMachineConfigurationOutput { - /** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ + /** A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. */ imageReference: ImageReferenceOutput; - /** - * The Batch Compute Node agent is a program that runs on each Compute Node in the - * Pool, and provides the command-and-control interface between the Compute Node - * and the Batch service. There are different implementations of the Compute Node - * agent, known as SKUs, for different operating systems. You must specify a - * Compute Node agent SKU which matches the selected Image reference. To get the - * list of supported Compute Node agent SKUs along with their list of verified - * Image references, see the 'List supported Compute Node agent SKUs' operation. - */ + /** The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. */ nodeAgentSKUId: string; - /** - * This property must not be specified if the imageReference property specifies a - * Linux OS Image. - */ + /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ windowsConfiguration?: WindowsConfigurationOutput; - /** - * This property must be specified if the Compute Nodes in the Pool need to have - * empty data disks attached to them. This cannot be updated. Each Compute Node - * gets its own disk (the disk is not a file share). Existing disks cannot be - * attached, each attached disk is empty. When the Compute Node is removed from - * the Pool, the disk and all data associated with it is also deleted. The disk is - * not formatted after being attached, it must be formatted before use - for more - * information see - * https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux - * and - * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. - */ + /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ dataDisks?: Array; /** * This only applies to Images that contain the Windows operating system, and @@ -390,29 +130,15 @@ export interface VirtualMachineConfigurationOutput { * */ licenseType?: string; - /** - * If specified, setup is performed on each Compute Node in the Pool to allow - * Tasks to run in containers. All regular Tasks and Job manager Tasks run on this - * Pool must specify the containerSettings property, and all other Tasks may - * specify it. - */ + /** The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. */ containerConfiguration?: ContainerConfigurationOutput; - /** - * If specified, encryption is performed on each node in the pool during node - * provisioning. - */ + /** The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. */ diskEncryptionConfiguration?: DiskEncryptionConfigurationOutput; - /** - * This configuration will specify rules on how nodes in the pool will be - * physically allocated. - */ + /** The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. */ nodePlacementConfiguration?: NodePlacementConfigurationOutput; - /** - * If specified, the extensions mentioned in this configuration will be installed - * on each node. - */ + /** The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. */ extensions?: Array; - /** Settings for the operating system disk of the compute node (VM). */ + /** Settings for the operating system disk of the Virtual Machine. */ osDisk?: OSDiskOutput; } @@ -422,38 +148,23 @@ export interface VirtualMachineConfigurationOutput { * verified by Azure Batch, see the 'List Supported Images' operation. */ export interface ImageReferenceOutput { - /** For example, Canonical or MicrosoftWindowsServer. */ + /** The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. */ publisher?: string; - /** For example, UbuntuServer or WindowsServer. */ + /** The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. */ offer?: string; - /** For example, 18.04-LTS or 2019-Datacenter. */ + /** The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. */ sku?: string; - /** - * A value of 'latest' can be specified to select the latest version of an Image. - * If omitted, the default is 'latest'. - */ + /** The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. */ version?: string; - /** - * This property is mutually exclusive with other ImageReference properties. The - * Shared Image Gallery Image must have replicas in the same region and must be in - * the same subscription as the Azure Batch account. If the image version is not - * specified in the imageId, the latest version will be used. For information - * about the firewall settings for the Batch Compute Node agent to communicate - * with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - */ + /** The ARM resource identifier of the Shared Image Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Shared Image Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ virtualMachineImageId?: string; - /** - * The specific version of the platform image or marketplace image used to create - * the node. This read-only field differs from 'version' only if the value - * specified for 'version' when the pool was created was 'latest'. - */ + /** The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. */ readonly exactVersion?: string; } /** Windows operating system settings to apply to the virtual machine. */ export interface WindowsConfigurationOutput { - /** If omitted, the default value is true. */ + /** Whether automatic updates are enabled on the virtual machine. If omitted, the default value is true. */ enableAutomaticUpdates?: boolean; } @@ -463,16 +174,10 @@ export interface WindowsConfigurationOutput { * disks from within a VM to use them. */ export interface DataDiskOutput { - /** - * The lun is used to uniquely identify each data disk. If attaching multiple - * disks, each should have a distinct lun. The value must be between 0 and 63, - * inclusive. - */ + /** The logical unit number. The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. The value must be between 0 and 63, inclusive. */ lun: number; /** - * The default value for caching is readwrite. For information about the caching - * options see: - * https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + * The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. * * Possible values: none, readonly, readwrite */ @@ -480,7 +185,7 @@ export interface DataDiskOutput { /** The initial disk size in gigabytes. */ diskSizeGB: number; /** - * If omitted, the default is "standard_lrs". + * The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". * * Possible values: standard_lrs, premium_lrs */ @@ -492,19 +197,12 @@ export interface ContainerConfigurationOutput { /** * The container technology to be used. * - * Possible values: dockerCompatible + * Possible values: dockerCompatible, criCompatible */ type: string; - /** - * This is the full Image reference, as would be specified to "docker pull". An - * Image will be sourced from the default Docker registry unless the Image is - * fully qualified with an alternative registry. - */ + /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ containerImageNames?: string[]; - /** - * If any Images must be downloaded from a private registry which requires - * credentials, then those credentials must be provided here. - */ + /** Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. */ containerRegistries?: Array; } @@ -514,20 +212,17 @@ export interface ContainerRegistryOutput { username?: string; /** The password to log into the registry server. */ password?: string; - /** If omitted, the default is "docker.io". */ + /** The registry URL. If omitted, the default is "docker.io". */ registryServer?: string; - /** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ - identityReference?: ComputeNodeIdentityReferenceOutput; + /** The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. */ + identityReference?: BatchNodeIdentityReferenceOutput; } /** * The reference to a user assigned identity associated with the Batch pool which * a compute node will use. */ -export interface ComputeNodeIdentityReferenceOutput { +export interface BatchNodeIdentityReferenceOutput { /** The ARM resource id of the user assigned identity. */ resourceId?: string; } @@ -538,11 +233,7 @@ export interface ComputeNodeIdentityReferenceOutput { * Image Gallery Image. */ export interface DiskEncryptionConfigurationOutput { - /** - * If omitted, no disks on the compute nodes in the pool will be encrypted. On - * Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" - * and "TemporaryDisk" must be specified. - */ + /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ targets?: string[]; } @@ -553,8 +244,7 @@ export interface DiskEncryptionConfigurationOutput { */ export interface NodePlacementConfigurationOutput { /** - * Allocation policy used by Batch Service to provision the nodes. If not - * specified, Batch will use the regional policy. + * Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. * * Possible values: regional, zonal */ @@ -571,34 +261,21 @@ export interface VMExtensionOutput { type: string; /** The version of script handler. */ typeHandlerVersion?: string; - /** - * Indicates whether the extension should use a newer minor version if one is - * available at deployment time. Once deployed, however, the extension will not - * upgrade minor versions unless redeployed, even with this property set to true. - */ + /** Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. */ autoUpgradeMinorVersion?: boolean; + /** Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available. */ + enableAutomaticUpgrade?: boolean; /** JSON formatted public settings for the extension. */ - settings?: ObjectOutput; - /** - * The extension can contain either protectedSettings or - * protectedSettingsFromKeyVault or no protected settings at all. - */ - protectedSettings?: ObjectOutput; - /** - * Collection of extension names after which this extension needs to be - * provisioned. - */ + settings?: Record; + /** The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. */ + protectedSettings?: Record; + /** The collection of extension names. Collection of extension names after which this extension needs to be provisioned. */ provisionAfterExtensions?: string[]; } -export interface ObjectOutput {} - /** Settings for the operating system disk of the compute node (VM). */ export interface OSDiskOutput { - /** - * Specifies the ephemeral Disk Settings for the operating system disk used by the - * compute node (VM). - */ + /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ ephemeralOSDiskSettings?: DiffDiskSettingsOutput; } @@ -608,98 +285,16 @@ export interface OSDiskOutput { */ export interface DiffDiskSettingsOutput { /** - * This property can be used by user in the request to choose the location e.g., - * cache disk space for Ephemeral OS disk provisioning. For more information on - * Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size - * requirements for Windows VMs at - * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - * and Linux VMs at - * https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. * * Possible values: cachedisk */ placement?: string; } -/** An error that occurred when resizing a Pool. */ -export interface ResizeErrorOutput { - /** - * An identifier for the Pool resize error. Codes are invariant and are intended - * to be consumed programmatically. - */ - code?: string; - /** - * A message describing the Pool resize error, intended to be suitable for display - * in a user interface. - */ - message?: string; - /** A list of additional error details related to the Pool resize error. */ - values?: Array; -} - -/** Represents a name-value pair. */ -export interface NameValuePairOutput { - /** The name in the name-value pair. */ - name?: string; - /** The value in the name-value pair. */ - value?: string; -} - -/** The results and errors from an execution of a Pool autoscale formula. */ -export interface AutoScaleRunOutput { - /** The time at which the autoscale formula was last evaluated. */ - readonly timestamp: string; - /** - * Each variable value is returned in the form $variable=value, and variables are - * separated by semicolons. - */ - results?: string; - /** An error that occurred when executing or evaluating a Pool autoscale formula. */ - error?: AutoScaleRunErrorOutput; -} - -/** An error that occurred when executing or evaluating a Pool autoscale formula. */ -export interface AutoScaleRunErrorOutput { - /** - * An identifier for the autoscale error. Codes are invariant and are intended to - * be consumed programmatically. - */ - code?: string; - /** - * A message describing the autoscale error, intended to be suitable for display - * in a user interface. - */ - message?: string; - /** A list of additional error details related to the autoscale error. */ - values?: Array; -} - /** The network configuration for a Pool. */ export interface NetworkConfigurationOutput { - /** - * The virtual network must be in the same region and subscription as the Azure - * Batch Account. The specified subnet should have enough free IP addresses to - * accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have - * enough free IP addresses, the Pool will partially allocate Nodes and a resize - * error will occur. The 'MicrosoftAzureBatch' service principal must have the - * 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for - * the specified VNet. The specified subnet must allow communication from the - * Azure Batch service to be able to schedule Tasks on the Nodes. This can be - * verified by checking if the specified VNet has any associated Network Security - * Groups (NSG). If communication to the Nodes in the specified subnet is denied - * by an NSG, then the Batch service will set the state of the Compute Nodes to - * unusable. For Pools created with virtualMachineConfiguration only ARM virtual - * networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools - * created with cloudServiceConfiguration both ARM and classic virtual networks - * are supported. If the specified VNet has any associated Network Security Groups - * (NSG), then a few reserved system ports must be enabled for inbound - * communication. For Pools created with a virtual machine configuration, enable - * ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. - * For Pools created with a cloud service configuration, enable ports 10100, - * 20100, and 30100. Also enable outbound connections to Azure Storage on port - * 443. For more details see: - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - */ + /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools created with cloudServiceConfiguration both ARM and classic virtual networks are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For Pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId?: string; /** * The scope of dynamic vnet assignment. @@ -707,26 +302,17 @@ export interface NetworkConfigurationOutput { * Possible values: none, job */ dynamicVNetAssignmentScope?: string; - /** - * Pool endpoint configuration is only supported on Pools with the - * virtualMachineConfiguration property. - */ + /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ endpointConfiguration?: PoolEndpointConfigurationOutput; - /** - * Public IP configuration property is only supported on Pools with the - * virtualMachineConfiguration property. - */ + /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ publicIPAddressConfiguration?: PublicIPAddressConfigurationOutput; + /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ + enableAcceleratedNetworking?: boolean; } /** The endpoint configuration for a Pool. */ export interface PoolEndpointConfigurationOutput { - /** - * The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum - * number of inbound NAT Pools is exceeded the request fails with HTTP status code - * 400. This cannot be specified if the IPAddressProvisioningType is - * NoPublicIPAddresses. - */ + /** A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. */ inboundNATPools: Array; } @@ -735,13 +321,7 @@ export interface PoolEndpointConfigurationOutput { * in a Batch Pool externally. */ export interface InboundNATPoolOutput { - /** - * The name must be unique within a Batch Pool, can contain letters, numbers, - * underscores, periods, and hyphens. Names must start with a letter or number, - * must end with a letter, number, or underscore, and cannot exceed 77 characters. - * If any invalid values are provided the request fails with HTTP status code - * 400. - */ + /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ name: string; /** * The protocol of the endpoint. @@ -749,47 +329,19 @@ export interface InboundNATPoolOutput { * Possible values: tcp, udp */ protocol: string; - /** - * This must be unique within a Batch Pool. Acceptable values are between 1 and - * 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any - * reserved values are provided the request fails with HTTP status code 400. - */ + /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ backendPort: number; - /** - * Acceptable values range between 1 and 65534 except ports from 50000 to 55000 - * which are reserved. All ranges within a Pool must be distinct and cannot - * overlap. Each range must contain at least 40 ports. If any reserved or - * overlapping values are provided the request fails with HTTP status code 400. - */ + /** The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ frontendPortRangeStart: number; - /** - * Acceptable values range between 1 and 65534 except ports from 50000 to 55000 - * which are reserved by the Batch service. All ranges within a Pool must be - * distinct and cannot overlap. Each range must contain at least 40 ports. If any - * reserved or overlapping values are provided the request fails with HTTP status - * code 400. - */ + /** The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ frontendPortRangeEnd: number; - /** - * The maximum number of rules that can be specified across all the endpoints on a - * Batch Pool is 25. If no network security group rules are specified, a default - * rule will be created to allow inbound access to the specified backendPort. If - * the maximum number of network security group rules is exceeded the request - * fails with HTTP status code 400. - */ + /** A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. */ networkSecurityGroupRules?: Array; } /** A network security group rule to apply to an inbound endpoint. */ export interface NetworkSecurityGroupRuleOutput { - /** - * Priorities within a Pool must be unique and are evaluated in order of priority. - * The lower the number the higher the priority. For example, rules could be - * specified with order numbers of 150, 250, and 350. The rule with the order - * number of 150 takes precedence over the rule that has an order of 250. Allowed - * priorities are 150 to 4096. If any reserved or duplicate values are provided - * the request fails with HTTP status code 400. - */ + /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ priority: number; /** * The action that should be taken for a specified IP address, subnet range or tag. @@ -797,37 +349,21 @@ export interface NetworkSecurityGroupRuleOutput { * Possible values: allow, deny */ access: string; - /** - * Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. - * 192.168.1.0/24), default tag, or * (for all addresses). If any other values - * are provided the request fails with HTTP status code 400. - */ + /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ sourceAddressPrefix: string; - /** - * Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a - * port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each - * entry in this collection must not overlap any other entry (either a range or an - * individual port). If any other values are provided the request fails with HTTP - * status code 400. The default value is '*'. - */ + /** The source port ranges to match for the rule. Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'. */ sourcePortRanges?: string[]; } /** The public IP Address configuration of the networking configuration of a Pool. */ export interface PublicIPAddressConfigurationOutput { /** - * The default value is BatchManaged. + * The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. * * Possible values: batchmanaged, usermanaged, nopublicipaddresses */ provision?: string; - /** - * The number of IPs specified here limits the maximum size of the Pool - 100 - * dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public - * IP. For example, a pool needing 250 dedicated VMs would need at least 3 public - * IPs specified. Each element of this collection is of the form: - * /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - */ + /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ ipAddressIds?: string[]; } @@ -847,75 +383,32 @@ export interface PublicIPAddressConfigurationOutput { * block Batch from being able to re-run the StartTask. */ export interface StartTaskOutput { - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ + /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ + /** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettingsOutput; - /** Files listed under this element are located in the Task's working directory. */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. */ resourceFiles?: Array; /** A list of environment variable settings for the StartTask. */ environmentSettings?: Array; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ + /** The user identity under which the StartTask runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ userIdentity?: UserIdentityOutput; - /** - * The Batch service retries a Task if its exit code is nonzero. Note that this - * value specifically controls the number of retries. The Batch service will try - * the Task once, and may then retry up to this limit. For example, if the maximum - * retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 - * retries). If the maximum retry count is 0, the Batch service does not retry the - * Task. If the maximum retry count is -1, the Batch service retries the Task - * without limit, however this is not recommended for a start task or any task. - * The default value is 0 (no retries) - */ + /** The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). */ maxTaskRetryCount?: number; - /** - * If true and the StartTask fails on a Node, the Batch service retries the - * StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has - * still not completed successfully after all retries, then the Batch service - * marks the Node unusable, and will not schedule Tasks to it. This condition can - * be detected via the Compute Node state and failure info details. If false, the - * Batch service will not wait for the StartTask to complete. In this case, other - * Tasks can start executing on the Compute Node while the StartTask is still - * running; and even if the StartTask fails, new Tasks will continue to be - * scheduled on the Compute Node. The default is true. - */ + /** Whether the Batch service should wait for the StartTask to complete successfully (that is, to exit with exit code 0) before scheduling any Tasks on the Compute Node. If true and the StartTask fails on a Node, the Batch service retries the StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has still not completed successfully after all retries, then the Batch service marks the Node unusable, and will not schedule Tasks to it. This condition can be detected via the Compute Node state and failure info details. If false, the Batch service will not wait for the StartTask to complete. In this case, other Tasks can start executing on the Compute Node while the StartTask is still running; and even if the StartTask fails, new Tasks will continue to be scheduled on the Compute Node. The default is true. */ waitForSuccess?: boolean; } /** The container settings for a Task. */ export interface TaskContainerSettingsOutput { - /** - * These additional options are supplied as arguments to the "docker create" - * command, in addition to those controlled by the Batch Service. - */ + /** Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. */ containerRunOptions?: string; - /** - * This is the full Image reference, as would be specified to "docker pull". If - * no tag is provided as part of the Image name, the tag ":latest" is used as a - * default. - */ + /** The Image to use to create the container in which the Task will run. This is the full Image reference, as would be specified to "docker pull". If no tag is provided as part of the Image name, the tag ":latest" is used as a default. */ imageName: string; - /** This setting can be omitted if was already provided at Pool creation. */ + /** The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. */ registry?: ContainerRegistryOutput; /** - * The default is 'taskWorkingDirectory'. + * The location of the container Task working directory. The default is 'taskWorkingDirectory'. * * Possible values: taskWorkingDirectory, containerImageDefault */ @@ -924,59 +417,20 @@ export interface TaskContainerSettingsOutput { /** A single file or multiple files to be downloaded to a Compute Node. */ export interface ResourceFileOutput { - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. - */ + /** The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. */ autoStorageContainerName?: string; - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. This URL must be readable - * and listable from compute nodes. There are three ways to get such a URL for a - * container in Azure storage: include a Shared Access Signature (SAS) granting - * read and list permissions on the container, use a managed identity with read - * and list permissions, or set the ACL for the container to allow public access. - */ + /** The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. */ storageContainerUrl?: string; - /** - * The autoStorageContainerName, storageContainerUrl and httpUrl properties are - * mutually exclusive and one of them must be specified. If the URL points to - * Azure Blob Storage, it must be readable from compute nodes. There are three - * ways to get such a URL for a blob in Azure storage: include a Shared Access - * Signature (SAS) granting read permissions on the blob, use a managed identity - * with read permission, or set the ACL for the blob or its container to allow - * public access. - */ + /** The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access. */ httpUrl?: string; - /** - * The property is valid only when autoStorageContainerName or storageContainerUrl - * is used. This prefix can be a partial filename or a subdirectory. If a prefix - * is not specified, all the files in the container will be downloaded. - */ + /** The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded. */ blobPrefix?: string; - /** - * If the httpUrl property is specified, the filePath is required and describes - * the path which the file will be downloaded to, including the filename. - * Otherwise, if the autoStorageContainerName or storageContainerUrl property is - * specified, filePath is optional and is the directory to download the files to. - * In the case where filePath is used as a directory, any directory structure - * already associated with the input data will be retained in full and appended to - * the specified filePath directory. The specified relative path cannot break out - * of the Task's working directory (for example by using '..'). - */ + /** The location on the Compute Node to which to download the file(s), relative to the Task's working directory. If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..'). */ filePath?: string; - /** - * This property applies only to files being downloaded to Linux Compute Nodes. It - * will be ignored if it is specified for a resourceFile which will be downloaded - * to a Windows Compute Node. If this property is not specified for a Linux - * Compute Node, then a default value of 0770 is applied to the file. - */ + /** The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file. */ fileMode?: string; - /** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ - identityReference?: ComputeNodeIdentityReferenceOutput; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by storageContainerUrl or httpUrl. */ + identityReference?: BatchNodeIdentityReferenceOutput; } /** An environment variable to be set on a Task process. */ @@ -987,100 +441,60 @@ export interface EnvironmentSettingOutput { value?: string; } -/** Specify either the userName or autoUser property, but not both. */ +/** The definition of the user identity under which the Task is run. Specify either the userName or autoUser property, but not both. */ export interface UserIdentityOutput { - /** - * The userName and autoUser properties are mutually exclusive; you must specify - * one but not both. - */ + /** The name of the user identity under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. */ username?: string; - /** - * The userName and autoUser properties are mutually exclusive; you must specify - * one but not both. - */ + /** The auto user under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. */ autoUser?: AutoUserSpecificationOutput; } -/** - * Specifies the parameters for the auto user that runs a Task on the Batch - * service. - */ +/** Specifies the options for the auto user that runs an Azure Batch Task. */ export interface AutoUserSpecificationOutput { /** - * The default value is pool. If the pool is running Windows a value of Task - * should be specified if stricter isolation between tasks is required. For - * example, if the task mutates the registry in a way which could impact other - * tasks, or if certificates have been specified on the pool which should not be - * accessible by normal tasks but should be accessible by StartTasks. + * The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks. * * Possible values: task, pool */ scope?: string; /** - * The default value is nonAdmin. + * The elevation level of the auto user. The default value is nonAdmin. * * Possible values: nonadmin, admin */ elevationLevel?: string; } -/** A reference to a Certificate to be installed on Compute Nodes in a Pool. */ +/** A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ export interface CertificateReferenceOutput { /** The thumbprint of the Certificate. */ thumbprint: string; /** The algorithm with which the thumbprint is associated. This must be sha1. */ thumbprintAlgorithm: string; /** - * The default value is currentuser. This property is applicable only for Pools - * configured with Windows Compute Nodes (that is, created with - * cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows - * Image reference). For Linux Compute Nodes, the Certificates are stored in a - * directory inside the Task working directory and an environment variable - * AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - * For Certificates with visibility of 'remoteUser', a 'certs' directory is - * created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. + * The location of the Certificate store on the Compute Node into which to install the Certificate. The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. * * Possible values: currentuser, localmachine */ storeLocation?: string; - /** - * This property is applicable only for Pools configured with Windows Compute - * Nodes (that is, created with cloudServiceConfiguration, or with - * virtualMachineConfiguration using a Windows Image reference). Common store - * names include: My, Root, CA, Trust, Disallowed, TrustedPeople, - * TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be - * used. The default value is My. - */ + /** The name of the Certificate store on the Compute Node into which to install the Certificate. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. */ storeName?: string; - /** - * You can specify more than one visibility in this collection. The default is all - * Accounts. - */ + /** Which user Accounts on the Compute Node should have access to the private data of the Certificate. You can specify more than one visibility in this collection. The default is all Accounts. */ visibility?: string[]; } /** A reference to an Package to be deployed to Compute Nodes. */ export interface ApplicationPackageReferenceOutput { - /** - * When creating a pool, the package's application ID must be fully qualified - * (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - */ + /** The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). */ applicationId: string; - /** - * If this is omitted on a Pool, and no default version is specified for this - * application, the request fails with the error code - * InvalidApplicationPackageReferences and HTTP status code 409. If this is - * omitted on a Task, and no default version is specified for this application, - * the Task fails with a pre-processing error. - */ + /** The version of the application to deploy. If omitted, the default version is deployed. If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error. */ version?: string; } /** Specifies how Tasks should be distributed across Compute Nodes. */ export interface TaskSchedulingPolicyOutput { /** - * If not specified, the default is spread. + * How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. * * Possible values: spread, pack */ @@ -1092,60 +506,36 @@ export interface TaskSchedulingPolicyOutput { * Compute Node. */ export interface UserAccountOutput { - /** - * The name of the user Account. Names can contain any Unicode characters up to a - * maximum length of 20. - */ + /** The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. */ name: string; /** The password for the user Account. */ password: string; /** - * The default value is nonAdmin. + * The elevation level of the user Account. The default value is nonAdmin. * * Possible values: nonadmin, admin */ elevationLevel?: string; - /** - * This property is ignored if specified on a Windows Pool. If not specified, the - * user is created with the default options. - */ + /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ linuxUserConfiguration?: LinuxUserConfigurationOutput; - /** - * This property can only be specified if the user is on a Windows Pool. If not - * specified and on a Windows Pool, the user is created with the default options. - */ + /** The Windows-specific user configuration for the user Account. This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options. */ windowsUserConfiguration?: WindowsUserConfigurationOutput; } /** Properties used to create a user Account on a Linux Compute Node. */ export interface LinuxUserConfigurationOutput { - /** - * The uid and gid properties must be specified together or not at all. If not - * specified the underlying operating system picks the uid. - */ + /** The user ID of the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid. */ uid?: number; - /** - * The uid and gid properties must be specified together or not at all. If not - * specified the underlying operating system picks the gid. - */ + /** The group ID for the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid. */ gid?: number; - /** - * The private key must not be password protected. The private key is used to - * automatically configure asymmetric-key based authentication for SSH between - * Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication - * property is true (it is ignored if enableInterNodeCommunication is false). It - * does this by placing the key pair into the user's .ssh directory. If not - * specified, password-less SSH is not configured between Compute Nodes (no - * modification of the user's .ssh directory is done). - */ + /** The SSH private key for the user Account. The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done). */ sshPrivateKey?: string; } /** Properties used to create a user Account on a Windows Compute Node. */ export interface WindowsUserConfigurationOutput { /** - * The default value for VirtualMachineConfiguration Pools is 'batch' and for - * CloudServiceConfiguration Pools is 'interactive'. + * The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch' and for CloudServiceConfiguration Pools is 'interactive'. * * Possible values: batch, interactive */ @@ -1165,13 +555,13 @@ export interface MetadataItemOutput { /** The file system to mount on each node. */ export interface MountConfigurationOutput { - /** This property is mutually exclusive with all other properties. */ + /** The Azure Storage Container to mount using blob FUSE on each node. This property is mutually exclusive with all other properties. */ azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfigurationOutput; - /** This property is mutually exclusive with all other properties. */ + /** The NFS file system to mount on each node. This property is mutually exclusive with all other properties. */ nfsMountConfiguration?: NFSMountConfigurationOutput; - /** This property is mutually exclusive with all other properties. */ + /** The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all other properties. */ cifsMountConfiguration?: CifsMountConfigurationOutput; - /** This property is mutually exclusive with all other properties. */ + /** The Azure File Share to mount on each node. This property is mutually exclusive with all other properties. */ azureFileShareConfiguration?: AzureFileShareConfigurationOutput; } @@ -1181,40 +571,25 @@ export interface AzureBlobFileSystemConfigurationOutput { accountName: string; /** The Azure Blob Storage Container name. */ containerName: string; - /** - * This property is mutually exclusive with both sasKey and identity; exactly one - * must be specified. - */ + /** The Azure Storage Account key. This property is mutually exclusive with both sasKey and identity; exactly one must be specified. */ accountKey?: string; - /** - * This property is mutually exclusive with both accountKey and identity; exactly - * one must be specified. - */ + /** The Azure Storage SAS token. This property is mutually exclusive with both accountKey and identity; exactly one must be specified. */ sasKey?: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ blobfuseOptions?: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ relativeMountPath: string; - /** - * This property is mutually exclusive with both accountKey and sasKey; exactly - * one must be specified. - */ - identityReference?: ComputeNodeIdentityReferenceOutput; + /** The reference to the user assigned identity to use to access containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one must be specified. */ + identityReference?: BatchNodeIdentityReferenceOutput; } /** Information used to connect to an NFS file system. */ export interface NFSMountConfigurationOutput { /** The URI of the file system to mount. */ source: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ mountOptions?: string; } @@ -1224,12 +599,9 @@ export interface CifsMountConfigurationOutput { username: string; /** The URI of the file system to mount. */ source: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ mountOptions?: string; /** The password to use for authentication against the CIFS file system. */ password: string; @@ -1239,40 +611,232 @@ export interface CifsMountConfigurationOutput { export interface AzureFileShareConfigurationOutput { /** The Azure Storage account name. */ accountName: string; - /** This is of the form 'https://{account}.file.core.windows.net/'. */ + /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */ azureFileUrl: string; /** The Azure Storage account key. */ accountKey: string; - /** - * All file systems are mounted relative to the Batch mounts directory, accessible - * via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - */ + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ relativeMountPath: string; - /** These are 'net use' options in Windows and 'mount' options in Linux. */ + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ mountOptions?: string; } +/** The result of listing the Pools in an Account. */ +export interface BatchPoolListResultOutput { + /** The list of Pools. */ + value?: Array; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** A Pool in the Azure Batch service. */ +export interface BatchPoolOutput { + /** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + readonly id?: string; + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + readonly displayName?: string; + /** The URL of the Pool. */ + readonly url?: string; + /** The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. */ + readonly lastModified?: string; + /** The creation time of the Pool. */ + readonly creationTime?: string; + /** + * The current state of the Pool. + * + * Possible values: active, deleting + */ + readonly state?: string; + /** The time at which the Pool entered its current state. */ + readonly stateTransitionTime?: string; + /** + * Whether the Pool is resizing. + * + * Possible values: steady, resizing, stopping + */ + readonly allocationState?: string; + /** The time at which the Pool entered its current allocation state. */ + readonly allocationStateTransitionTime?: string; + /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + readonly vmSize?: string; + /** The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ + readonly cloudServiceConfiguration?: CloudServiceConfigurationOutput; + /** The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. */ + readonly virtualMachineConfiguration?: VirtualMachineConfigurationOutput; + /** The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. */ + readonly resizeTimeout?: string; + /** A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. */ + readonly resizeErrors?: Array; + /** The number of dedicated Compute Nodes currently in the Pool. */ + readonly currentDedicatedNodes?: number; + /** The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. */ + readonly currentLowPriorityNodes?: number; + /** The desired number of dedicated Compute Nodes in the Pool. */ + readonly targetDedicatedNodes?: number; + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ + readonly targetLowPriorityNodes?: number; + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */ + readonly enableAutoScale?: boolean; + /** A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleFormula?: string; + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleEvaluationInterval?: string; + /** The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleRun?: AutoScaleRunOutput; + /** Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. */ + readonly enableInterNodeCommunication?: boolean; + /** The network configuration for the Pool. */ + readonly networkConfiguration?: NetworkConfigurationOutput; + /** A Task specified to run on each Compute Node as it joins the Pool. */ + startTask?: StartTaskOutput; + /** + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + readonly certificateReferences?: Array; + /** The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ + readonly applicationPackageReferences?: Array; + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. */ + readonly applicationLicenses?: string[]; + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ + readonly taskSlotsPerNode?: number; + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + readonly taskSchedulingPolicy?: TaskSchedulingPolicyOutput; + /** The list of user Accounts to be created on each Compute Node in the Pool. */ + readonly userAccounts?: Array; + /** A list of name-value pairs associated with the Pool as metadata. */ + readonly metadata?: Array; + /** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + readonly stats?: PoolStatisticsOutput; + /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ + readonly mountConfiguration?: Array; + /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + readonly identity?: BatchPoolIdentityOutput; + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: default, classic, simplified + */ + targetNodeCommunicationMode?: string; + /** + * The current state of the pool communication mode. + * + * Possible values: default, classic, simplified + */ + readonly currentNodeCommunicationMode?: string; +} + +/** An error that occurred when resizing a Pool. */ +export interface ResizeErrorOutput { + /** An identifier for the Pool resize error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Pool resize error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the Pool resize error. */ + values?: Array; +} + +/** Represents a name-value pair. */ +export interface NameValuePairOutput { + /** The name in the name-value pair. */ + name?: string; + /** The value in the name-value pair. */ + value?: string; +} + +/** The results and errors from an execution of a Pool autoscale formula. */ +export interface AutoScaleRunOutput { + /** The time at which the autoscale formula was last evaluated. */ + timestamp: string; + /** The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form $variable=value, and variables are separated by semicolons. */ + results?: string; + /** Details of the error encountered evaluating the autoscale formula on the Pool, if the evaluation was unsuccessful. */ + error?: AutoScaleRunErrorOutput; +} + +/** An error that occurred when executing or evaluating a Pool autoscale formula. */ +export interface AutoScaleRunErrorOutput { + /** An identifier for the autoscale error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the autoscale error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the autoscale error. */ + values?: Array; +} + +/** Contains utilization and resource usage statistics for the lifetime of a Pool. */ +export interface PoolStatisticsOutput { + /** The URL for the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: string; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: string; + /** Statistics related to Pool usage, such as the amount of core-time used. */ + usageStats?: UsageStatisticsOutput; + /** Statistics related to resource consumption by Compute Nodes in the Pool. */ + resourceStats?: ResourceStatisticsOutput; +} + +/** Statistics related to Pool usage information. */ +export interface UsageStatisticsOutput { + /** The start time of the time range covered by the statistics. */ + startTime: string; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: string; + /** The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. */ + dedicatedCoreTime: string; +} + +/** Statistics related to resource consumption by Compute Nodes in a Pool. */ +export interface ResourceStatisticsOutput { + /** The start time of the time range covered by the statistics. */ + startTime: string; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: string; + /** The average CPU usage across all Compute Nodes in the Pool (percentage per node). */ + avgCPUPercentage: number; + /** The average memory usage in GiB across all Compute Nodes in the Pool. */ + avgMemoryGiB: number; + /** The peak memory usage in GiB across all Compute Nodes in the Pool. */ + peakMemoryGiB: number; + /** The average used disk space in GiB across all Compute Nodes in the Pool. */ + avgDiskGiB: number; + /** The peak used disk space in GiB across all Compute Nodes in the Pool. */ + peakDiskGiB: number; + /** The total number of disk read operations across all Compute Nodes in the Pool. */ + diskReadIOps: number; + /** The total number of disk write operations across all Compute Nodes in the Pool. */ + diskWriteIOps: number; + /** The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. */ + diskReadGiB: number; + /** The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. */ + diskWriteGiB: number; + /** The total amount of data in GiB of network reads across all Compute Nodes in the Pool. */ + networkReadGiB: number; + /** The total amount of data in GiB of network writes across all Compute Nodes in the Pool. */ + networkWriteGiB: number; +} + /** The identity of the Batch pool, if configured. */ export interface BatchPoolIdentityOutput { /** - * The list of user identities associated with the Batch pool. The user identity - * dictionary key references will be ARM resource ids in the form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + * The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. * * Possible values: UserAssigned, None */ type: string; - /** - * The user identity dictionary key references will be ARM resource ids in the - * form: - * '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - */ + /** The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ userAssignedIdentities?: Array; } /** The user assigned Identity */ export interface UserAssignedIdentityOutput { - /** The ARM resource id of the user assigned identity */ + /** The ARM resource id of the user assigned identity. */ resourceId: string; /** The client id of the user assigned identity. */ readonly clientId?: string; @@ -1280,12 +844,10 @@ export interface UserAssignedIdentityOutput { readonly principalId?: string; } -/** The result of listing the Pools in an Account. */ -export interface BatchPoolListResultOutput { - /** The list of Pools. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +/** Options for evaluating an automatic scaling formula on an Azure Batch Pool. */ +export interface BatchPoolEvaluateAutoScaleOptionsOutput { + /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + autoScaleFormula: string; } /** The result of listing the supported Virtual Machine Images. */ @@ -1302,12 +864,8 @@ export interface AccountListSupportedImagesResultOutput { */ export interface ImageInformationOutput { /** The ID of the Compute Node agent SKU which the Image supports. */ - readonly nodeAgentSKUId: string; - /** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ + nodeAgentSKUId: string; + /** The reference to the Azure Virtual Machine's Marketplace Image. */ imageReference: ImageReferenceOutput; /** * The type of operating system (e.g. Windows or Linux) of the Image. @@ -1315,20 +873,12 @@ export interface ImageInformationOutput { * Possible values: linux, windows */ osType: string; - /** - * Not every capability of the Image is listed. Capabilities in this list are - * considered of special interest and are generally related to integration with - * other features in the Azure Batch service. - */ + /** The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. */ capabilities?: string[]; - /** - * The time when the Azure Batch service will stop accepting create Pool requests - * for the Image. - */ + /** The time when the Azure Batch service will stop accepting create Pool requests for the Image. */ batchSupportEndOfLife?: string; /** - * Whether the Azure Batch service actively verifies that the Image is compatible - * with the associated Compute Node agent SKU. + * Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. * * Possible values: verified, unverified */ @@ -1346,10 +896,10 @@ export interface PoolNodeCountsListResultOutput { /** The number of Compute Nodes in each state for a Pool. */ export interface PoolNodeCountsOutput { /** The ID of the Pool. */ - readonly poolId: string; - /** The number of Compute Nodes in each Compute Node state. */ + poolId: string; + /** The number of dedicated Compute Nodes in each state. */ dedicated?: NodeCountsOutput; - /** The number of Compute Nodes in each Compute Node state. */ + /** The number of Spot/Low-priority Compute Nodes in each state. */ lowPriority?: NodeCountsOutput; } @@ -1385,97 +935,24 @@ export interface NodeCountsOutput { total: number; } -/** Resource usage statistics for a Job. */ -export interface JobStatisticsOutput { - /** The URL of the statistics. */ - readonly url: string; - /** The start time of the time range covered by the statistics. */ - startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ - lastUpdateTime: string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in the Job. - */ - userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in the Job. - */ - kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on - * a Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If a Task was retried, this - * includes the wall clock time of all the Task retries. - */ - wallClockTime: string; - /** The total number of disk read operations made by all Tasks in the Job. */ - readIOps: number; - /** The total number of disk write operations made by all Tasks in the Job. */ - writeIOps: number; - /** The total amount of data in GiB read from disk by all Tasks in the Job. */ - readIOGiB: number; - /** The total amount of data in GiB written to disk by all Tasks in the Job. */ - writeIOGiB: number; - /** A Task completes successfully if it returns exit code 0. */ - numSucceededTasks: number; - /** - * A Task fails if it exhausts its maximum retry count without returning exit code - * 0. - */ - numFailedTasks: number; - /** - * The total number of retries on all the Tasks in the Job during the given time - * range. - */ - numTaskRetries: number; - /** - * The wait time for a Task is defined as the elapsed time between the creation of - * the Task and the start of Task execution. (If the Task is retried due to - * failures, the wait time is the time to the most recent Task execution.) This - * value is only reported in the Account lifetime statistics; it is not included - * in the Job statistics. - */ - waitTime: string; -} - /** An Azure Batch Job. */ export interface BatchJobOutput { - /** - * The ID is case-preserving and case-insensitive (that is, you may not have two - * IDs within an Account that differ only by case). - */ - id?: string; + /** A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + readonly id?: string; /** The display name for the Job. */ - displayName?: string; - /** - * Whether Tasks in the Job can define dependencies on each other. The default is - * false. - */ - usesTaskDependencies?: boolean; + readonly displayName?: string; + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ + readonly usesTaskDependencies?: boolean; /** The URL of the Job. */ readonly url?: string; - /** - * This is an opaque string. You can use it to detect whether the Job has changed - * between requests. In particular, you can be pass the ETag when updating a Job - * to specify that your changes should take effect only if nobody else has - * modified the Job in the meantime. - */ + /** The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */ readonly eTag?: string; - /** - * This is the last time at which the Job level data, such as the Job state or - * priority, changed. It does not factor in task-level changes such as adding new - * Tasks or Tasks changing state. - */ + /** The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. */ readonly lastModified?: string; /** The creation time of the Job. */ readonly creationTime?: string; /** - * The state of the Job. + * The current state of the Job. * * Possible values: active, disabling, disabled, enabling, terminating, completed, deleting */ @@ -1483,131 +960,63 @@ export interface BatchJobOutput { /** The time at which the Job entered its current state. */ readonly stateTransitionTime?: string; /** - * This property is not set if the Job is in its initial Active state. + * The previous state of the Job. This property is not set if the Job is in its initial Active state. * * Possible values: active, disabling, disabled, enabling, terminating, completed, deleting */ readonly previousState?: string; - /** This property is not set if the Job is in its initial Active state. */ + /** The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. */ readonly previousStateTransitionTime?: string; - /** - * Priority values can range from -1000 to 1000, with -1000 being the lowest - * priority and 1000 being the highest priority. The default value is 0. - */ + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ priority?: number; - /** - * If the value is set to True, other high priority jobs submitted to the system - * will take precedence and will be able requeue tasks from this job. You can - * update a job's allowTaskPreemption after it has been created using the update - * job API. - */ + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ allowTaskPreemption?: boolean; - /** - * The value of maxParallelTasks must be -1 or greater than 0 if specified. If not - * specified, the default value is -1, which means there's no limit to the number - * of tasks that can be run at once. You can update a job's maxParallelTasks after - * it has been created using the update job API. - */ + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ maxParallelTasks?: number; - /** The execution constraints for a Job. */ + /** The execution constraints for the Job. */ constraints?: JobConstraintsOutput; + /** Details of a Job Manager Task to be launched when the Job is started. */ + readonly jobManagerTask?: JobManagerTaskOutput; + /** The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. */ + readonly jobPreparationTask?: JobPreparationTaskOutput; + /** The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. */ + readonly jobReleaseTask?: JobReleaseTaskOutput; + /** The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ + readonly commonEnvironmentSettings?: Array; + /** The Pool settings associated with the Job. */ + poolInfo: PoolInformationOutput; /** - * The Job Manager Task is automatically started when the Job is created. The - * Batch service tries to schedule the Job Manager Task before any other Tasks in - * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where - * Job Manager Tasks are running for as long as possible (that is, Compute Nodes - * running 'normal' Tasks are removed before Compute Nodes running Job Manager - * Tasks). When a Job Manager Task fails and needs to be restarted, the system - * tries to schedule it at the highest priority. If there are no idle Compute - * Nodes available, the system may terminate one of the running Tasks in the Pool - * and return it to the queue in order to make room for the Job Manager Task to - * restart. Note that a Job Manager Task in one Job does not have priority over - * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For - * example, if a Job Manager in a priority 0 Job needs to be restarted, it will - * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery - * operation is triggered on a Node. Examples of recovery operations include (but - * are not limited to) when an unhealthy Node is rebooted or a Compute Node - * disappeared due to host failure. Retries due to recovery operations are - * independent of and are not counted against the maxTaskRetryCount. Even if the - * maxTaskRetryCount is 0, an internal retry due to a recovery operation may - * occur. Because of this, all Tasks should be idempotent. This means Tasks need - * to tolerate being interrupted and restarted without causing any corruption or - * duplicate data. The best practice for long running Tasks is to use some form of - * checkpointing. - */ - jobManagerTask?: JobManagerTaskOutput; - /** - * The Job Preparation Task is a special Task run on each Compute Node before any - * other Task of the Job. - */ - jobPreparationTask?: JobPreparationTaskOutput; - /** - * The Job Release Task is a special Task run at the end of the Job on each - * Compute Node that has run any other Task of the Job. - */ - jobReleaseTask?: JobReleaseTaskOutput; - /** - * Individual Tasks can override an environment setting specified here by - * specifying the same setting name with a different value. - */ - commonEnvironmentSettings?: Array; - /** Specifies how a Job should be assigned to a Pool. */ - poolInfo?: PoolInformationOutput; - /** - * The default is noaction. + * The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. * * Possible values: noaction, terminatejob */ onAllTasksComplete?: string; /** - * A Task is considered to have failed if has a failureInfo. A failureInfo is set - * if the Task completes with a non-zero exit code after exhausting its retry - * count, or if there was an error starting the Task, for example due to a - * resource file download error. The default is noaction. + * The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. * * Possible values: noaction, performexitoptionsjobaction */ - onTaskFailure?: string; + readonly onTaskFailure?: string; /** The network configuration for the Job. */ - networkConfiguration?: JobNetworkConfigurationOutput; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ + readonly networkConfiguration?: JobNetworkConfigurationOutput; + /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; - /** Contains information about the execution of a Job in the Azure Batch service. */ + /** The execution information for the Job. */ readonly executionInfo?: JobExecutionInformationOutput; - /** - * This property is populated only if the CloudJob was retrieved with an expand - * clause including the 'stats' attribute; otherwise it is null. The statistics - * may not be immediately available. The Batch service performs periodic roll-up - * of statistics. The typical delay is about 30 minutes. - */ + /** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ readonly stats?: JobStatisticsOutput; } /** The execution constraints for a Job. */ export interface JobConstraintsOutput { - /** - * If the Job does not complete within the time limit, the Batch service - * terminates it and any Tasks that are still running. In this case, the - * termination reason will be MaxWallClockTimeExpiry. If this property is not - * specified, there is no time limit on how long the Job may run. - */ + /** The maximum elapsed time that the Job may run, measured from the time the Job is created. If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run. */ maxWallClockTime?: string; - /** - * Note that this value specifically controls the number of retries. The Batch - * service will try each Task once, and may then retry up to this limit. For - * example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one - * initial try and 3 retries). If the maximum retry count is 0, the Batch service - * does not retry Tasks. If the maximum retry count is -1, the Batch service - * retries the Task without limit, however this is not recommended for a start - * task or any task. The default value is 0 (no retries) - */ + /** The maximum number of times each Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries). */ maxTaskRetryCount?: number; } /** + * Specifies details of a Job Manager Task. * The Job Manager Task is automatically started when the Job is created. The * Batch service tries to schedule the Job Manager Task before any other Tasks in * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where @@ -1632,191 +1041,90 @@ export interface JobConstraintsOutput { * checkpointing. */ export interface JobManagerTaskOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. - */ + /** A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. */ id: string; - /** - * It need not be unique and can contain any Unicode characters up to a maximum - * length of 1024. - */ + /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ + /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; - /** - * If the Pool that will run this Task has containerConfiguration set, this must - * be set as well. If the Pool that will run this Task doesn't have - * containerConfiguration set, this must not be set. When this is specified, all - * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure - * Batch directories on the node) are mapped into the container, all Task - * environment variables are mapped into the container, and the Task command line - * is executed in the container. Files produced in the container outside of - * AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that - * Batch file APIs will not be able to access those files. - */ + /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettingsOutput; - /** - * Files listed under this element are located in the Task's working directory. - * There is a maximum size for the list of resource files. When the max size is - * exceeded, the request will fail and the response error code will be - * RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be - * reduced in size. This can be achieved using .zip files, Application Packages, - * or Docker Containers. - */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ resourceFiles?: Array; - /** - * For multi-instance Tasks, the files will only be uploaded from the Compute Node - * on which the primary Task is executed. - */ + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ outputFiles?: Array; /** A list of environment variable settings for the Job Manager Task. */ environmentSettings?: Array; - /** Execution constraints to apply to a Task. */ + /** Constraints that apply to the Job Manager Task. */ constraints?: TaskConstraintsOutput; - /** - * The default is 1. A Task can only be scheduled to run on a compute node if the - * node has enough free scheduling slots available. For multi-instance Tasks, this - * property is not supported and must not be specified. - */ + /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this property is not supported and must not be specified. */ requiredSlots?: number; - /** - * If true, when the Job Manager Task completes, the Batch service marks the Job - * as complete. If any Tasks are still running at this time (other than Job - * Release), those Tasks are terminated. If false, the completion of the Job - * Manager Task does not affect the Job status. In this case, you should either - * use the onAllTasksComplete attribute to terminate the Job, or have a client or - * user terminate the Job explicitly. An example of this is if the Job Manager - * creates a set of Tasks but then takes no further role in their execution. The - * default value is true. If you are using the onAllTasksComplete and - * onTaskFailure attributes to control Job lifetime, and using the Job Manager - * Task only to create the Tasks for the Job (not to monitor progress), then it is - * important to set killJobOnCompletion to false. - */ + /** Whether completion of the Job Manager Task signifies completion of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false. */ killJobOnCompletion?: boolean; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ + /** The user identity under which the Job Manager Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ userIdentity?: UserIdentityOutput; - /** - * If true, no other Tasks will run on the same Node for as long as the Job - * Manager is running. If false, other Tasks can run simultaneously with the Job - * Manager on a Compute Node. The Job Manager Task counts normally against the - * Compute Node's concurrent Task limit, so this is only relevant if the Compute - * Node allows multiple concurrent Tasks. The default value is true. - */ + /** Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true. */ runExclusive?: boolean; /** - * Application Packages are downloaded and deployed to a shared directory, not the - * Task working directory. Therefore, if a referenced Application Package is - * already on the Compute Node, and is up to date, then it is not re-downloaded; + * A list of Application Packages that the Batch service will deploy to the + * Compute Node before running the command line.Application Packages are + * downloaded and deployed to a shared directory, not the Task working + * directory. Therefore, if a referenced Application Package is already + * on the Compute Node, and is up to date, then it is not re-downloaded; * the existing copy on the Compute Node is used. If a referenced Application * Package cannot be installed, for example because the package has been deleted * or because download failed, the Task fails. */ applicationPackageReferences?: Array; - /** - * If this property is set, the Batch service provides the Task with an - * authentication token which can be used to authenticate Batch service operations - * without requiring an Account access key. The token is provided via the - * AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the - * Task can carry out using the token depend on the settings. For example, a Task - * can request Job permissions in order to add other Tasks to the Job, or check - * the status of the Job or of other Tasks under the Job. - */ + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ authenticationTokenSettings?: AuthenticationTokenSettingsOutput; - /** The default value is true. */ + /** Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is true. */ allowLowPriorityNode?: boolean; } -/** - * On every file uploads, Batch service writes two log files to the compute node, - * 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn - * more about a specific failure. - */ +/** On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. */ export interface OutputFileOutput { - /** - * Both relative and absolute paths are supported. Relative paths are relative to - * the Task working directory. The following wildcards are supported: * matches 0 - * or more characters (for example pattern abc* would match abc or abcdef), ** - * matches any directory, ? matches any single character, [abc] matches one - * character in the brackets, and [a-c] matches one character in the range. - * Brackets can include a negation to match any character not specified (for - * example [!abc] matches any character but a, b, or c). If a file name starts - * with "." it is ignored by default but may be matched by specifying it - * explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple - * example: **\*.txt matches any file that does not start in '.' and ends with - * .txt in the Task working directory or any subdirectory. If the filename - * contains a wildcard character it can be escaped using brackets (for example - * abc[*] would match a file named abc*). Note that both \ and / are treated as - * directory separators on Windows, but only / is on Linux. Environment variables - * (%var% on Windows or $var on Linux) are expanded prior to the pattern being - * applied. - */ + /** A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. */ filePattern: string; - /** The destination to which a file should be uploaded. */ + /** The destination for the output file(s). */ destination: OutputFileDestinationOutput; - /** - * Details about an output file upload operation, including under what conditions - * to perform the upload. - */ + /** Additional options for the upload operation, including under what conditions to perform the upload. */ uploadOptions: OutputFileUploadOptionsOutput; } /** The destination to which a file should be uploaded. */ export interface OutputFileDestinationOutput { - /** Specifies a file upload destination within an Azure blob storage container. */ + /** A location in Azure blob storage to which files are uploaded. */ container?: OutputFileBlobContainerDestinationOutput; } /** Specifies a file upload destination within an Azure blob storage container. */ export interface OutputFileBlobContainerDestinationOutput { - /** - * If filePattern refers to a specific file (i.e. contains no wildcards), then - * path is the name of the blob to which to upload that file. If filePattern - * contains one or more wildcards (and therefore may match multiple files), then - * path is the name of the blob virtual directory (which is prepended to each blob - * name) to which to upload the file(s). If omitted, file(s) are uploaded to the - * root of the container with a blob name matching their file name. - */ + /** The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. */ path?: string; - /** - * If not using a managed identity, the URL must include a Shared Access Signature - * (SAS) granting write permissions to the container. - */ + /** The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. */ containerUrl: string; - /** The identity must have write access to the Azure Blob Storage container */ - identityReference?: ComputeNodeIdentityReferenceOutput; - /** - * These headers will be specified when uploading files to Azure Storage. Official - * document on allowed headers when uploading blobs: - * https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types - */ + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ + identityReference?: BatchNodeIdentityReferenceOutput; + /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ uploadHeaders?: Array; } /** An HTTP header name-value pair */ export interface HttpHeaderOutput { - /** The case-insensitive name of the header to be used while uploading output files */ + /** The case-insensitive name of the header to be used while uploading output files. */ name: string; - /** The value of the header to be used while uploading output files */ + /** The value of the header to be used while uploading output files. */ value?: string; } /** - * Details about an output file upload operation, including under what conditions + * Options for an output file upload operation, including under what conditions * to perform the upload. */ export interface OutputFileUploadOptionsOutput { /** - * The default is taskcompletion. + * The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. * * Possible values: tasksuccess, taskfailure, taskcompletion */ @@ -1825,23 +1133,11 @@ export interface OutputFileUploadOptionsOutput { /** Execution constraints to apply to a Task. */ export interface TaskConstraintsOutput { - /** If this is not specified, there is no time limit on how long the Task may run. */ + /** The maximum elapsed time that the Task may run, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run. */ maxWallClockTime?: string; - /** - * The default is 7 days, i.e. the Task directory will be retained for 7 days - * unless the Compute Node is removed or the Job is deleted. - */ + /** The minimum time to retain the Task directory on the Compute Node where it ran, from the time it completes execution. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. */ retentionTime?: string; - /** - * Note that this value specifically controls the number of retries for the Task - * executable due to a nonzero exit code. The Batch service will try the Task - * once, and may then retry up to this limit. For example, if the maximum retry - * count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). - * If the maximum retry count is 0, the Batch service does not retry the Task - * after the first attempt. If the maximum retry count is -1, the Batch service - * retries the Task without limit, however this is not recommended for a start - * task or any task. The default value is 0 (no retries) - */ + /** The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). */ maxTaskRetryCount?: number; } @@ -1850,16 +1146,12 @@ export interface TaskConstraintsOutput { * service operations. */ export interface AuthenticationTokenSettingsOutput { - /** - * The authentication token grants access to a limited set of Batch service - * operations. Currently the only supported value for the access property is - * 'job', which grants access to all operations related to the Job which contains - * the Task. - */ + /** The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. */ access?: string[]; } /** + * A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. * You can use Job Preparation to prepare a Node to run Tasks for the Job. * Activities commonly performed in Job Preparation include: Downloading common * resource files used by all the Tasks in the Job. The Job Preparation Task can @@ -1887,80 +1179,28 @@ export interface AuthenticationTokenSettingsOutput { * running Tasks is to use some form of checkpointing. */ export interface JobPreparationTaskOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. If you do not - * specify this property, the Batch service assigns a default value of - * 'jobpreparation'. No other Task in the Job can have the same ID as the Job - * Preparation Task. If you try to submit a Task with the same id, the Batch - * service rejects the request with error code TaskIdSameAsJobPreparationTask; if - * you are calling the REST API directly, the HTTP status code is 409 (Conflict). - */ + /** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ + /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ + /** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettingsOutput; - /** - * Files listed under this element are located in the Task's working directory. - * There is a maximum size for the list of resource files. When the max size is - * exceeded, the request will fail and the response error code will be - * RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be - * reduced in size. This can be achieved using .zip files, Application Packages, - * or Docker Containers. - */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ resourceFiles?: Array; /** A list of environment variable settings for the Job Preparation Task. */ environmentSettings?: Array; - /** Execution constraints to apply to a Task. */ + /** Constraints that apply to the Job Preparation Task. */ constraints?: TaskConstraintsOutput; - /** - * If true and the Job Preparation Task fails on a Node, the Batch service retries - * the Job Preparation Task up to its maximum retry count (as specified in the - * constraints element). If the Task has still not completed successfully after - * all retries, then the Batch service will not schedule Tasks of the Job to the - * Node. The Node remains active and eligible to run Tasks of other Jobs. If - * false, the Batch service will not wait for the Job Preparation Task to - * complete. In this case, other Tasks of the Job can start executing on the - * Compute Node while the Job Preparation Task is still running; and even if the - * Job Preparation Task fails, new Tasks will continue to be scheduled on the - * Compute Node. The default value is true. - */ + /** Whether the Batch service should wait for the Job Preparation Task to complete successfully before scheduling any other Tasks of the Job on the Compute Node. A Job Preparation Task has completed successfully if it exits with exit code 0. If true and the Job Preparation Task fails on a Node, the Batch service retries the Job Preparation Task up to its maximum retry count (as specified in the constraints element). If the Task has still not completed successfully after all retries, then the Batch service will not schedule Tasks of the Job to the Node. The Node remains active and eligible to run Tasks of other Jobs. If false, the Batch service will not wait for the Job Preparation Task to complete. In this case, other Tasks of the Job can start executing on the Compute Node while the Job Preparation Task is still running; and even if the Job Preparation Task fails, new Tasks will continue to be scheduled on the Compute Node. The default value is true. */ waitForSuccess?: boolean; - /** - * If omitted, the Task runs as a non-administrative user unique to the Task on - * Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux - * Compute Nodes. - */ + /** The user identity under which the Job Preparation Task runs. If omitted, the Task runs as a non-administrative user unique to the Task on Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux Compute Nodes. */ userIdentity?: UserIdentityOutput; - /** - * The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - * the Job Preparation Task did not complete (e.g. because the reboot occurred - * while the Task was running). Therefore, you should always write a Job - * Preparation Task to be idempotent and to behave correctly if run multiple - * times. The default value is true. - */ + /** Whether the Batch service should rerun the Job Preparation Task after a Compute Node reboots. The Job Preparation Task is always rerun if a Compute Node is reimaged, or if the Job Preparation Task did not complete (e.g. because the reboot occurred while the Task was running). Therefore, you should always write a Job Preparation Task to be idempotent and to behave correctly if run multiple times. The default value is true. */ rerunOnNodeRebootAfterSuccess?: boolean; } /** + * A Job Release Task to run on Job completion on any Compute Node where the Job has run. * The Job Release Task runs when the Job ends, because of one of the following: * The user calls the Terminate Job API, or the Delete Job API while the Job is * still active, the Job's maximum wall clock time constraint is reached, and the @@ -1978,79 +1218,29 @@ export interface JobPreparationTaskOutput { * specified on the Pool. */ export interface JobReleaseTaskOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores and cannot contain more than 64 characters. If you do not - * specify this property, the Batch service assigns a default value of - * 'jobrelease'. No other Task in the Job can have the same ID as the Job Release - * Task. If you try to submit a Task with the same id, the Batch service rejects - * the request with error code TaskIdSameAsJobReleaseTask; if you are calling the - * REST API directly, the HTTP status code is 409 (Conflict). - */ + /** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** - * The command line does not run under a shell, and therefore cannot take - * advantage of shell features such as environment variable expansion. If you want - * to take advantage of such features, you should invoke the shell in the command - * line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - * MyCommand" in Linux. If the command line refers to file paths, it should use a - * relative path (relative to the Task working directory), or use the Batch - * provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ + /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; - /** - * When this is specified, all directories recursively below the - * AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are - * mapped into the container, all Task environment variables are mapped into the - * container, and the Task command line is executed in the container. Files - * produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be - * reflected to the host disk, meaning that Batch file APIs will not be able to - * access those files. - */ + /** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettingsOutput; - /** Files listed under this element are located in the Task's working directory. */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. */ resourceFiles?: Array; /** A list of environment variable settings for the Job Release Task. */ environmentSettings?: Array; - /** - * The maximum elapsed time that the Job Release Task may run on a given Compute - * Node, measured from the time the Task starts. If the Task does not complete - * within the time limit, the Batch service terminates it. The default value is 15 - * minutes. You may not specify a timeout longer than 15 minutes. If you do, the - * Batch service rejects it with an error; if you are calling the REST API - * directly, the HTTP status code is 400 (Bad Request). - */ + /** The maximum elapsed time that the Job Release Task may run on a given Compute Node, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. The default value is 15 minutes. You may not specify a timeout longer than 15 minutes. If you do, the Batch service rejects it with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ maxWallClockTime?: string; - /** - * The default is 7 days, i.e. the Task directory will be retained for 7 days - * unless the Compute Node is removed or the Job is deleted. - */ + /** The minimum time to retain the Task directory for the Job Release Task on the Compute Node. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. */ retentionTime?: string; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ + /** The user identity under which the Job Release Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ userIdentity?: UserIdentityOutput; } /** Specifies how a Job should be assigned to a Pool. */ export interface PoolInformationOutput { - /** - * You must ensure that the Pool referenced by this property exists. If the Pool - * does not exist at the time the Batch service tries to schedule a Job, no Tasks - * for the Job will run until you create a Pool with that id. Note that the Batch - * service will not reject the Job request; it will simply not run Tasks until the - * Pool exists. You must specify either the Pool ID or the auto Pool - * specification, but not both. - */ + /** The ID of an existing Pool. All the Tasks of the Job will run on the specified Pool. You must ensure that the Pool referenced by this property exists. If the Pool does not exist at the time the Batch service tries to schedule a Job, no Tasks for the Job will run until you create a Pool with that id. Note that the Batch service will not reject the Job request; it will simply not run Tasks until the Pool exists. You must specify either the Pool ID or the auto Pool specification, but not both. */ poolId?: string; - /** - * If auto Pool creation fails, the Batch service moves the Job to a completed - * state, and the Pool creation error is set in the Job's scheduling error - * property. The Batch service manages the lifetime (both creation and, unless - * keepAlive is specified, deletion) of the auto Pool. Any user actions that - * affect the lifetime of the auto Pool while the Job is active will result in - * unexpected behavior. You must specify either the Pool ID or the auto Pool - * specification, but not both. - */ + /** Characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. If auto Pool creation fails, the Batch service moves the Job to a completed state, and the Pool creation error is set in the Job's scheduling error property. The Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) of the auto Pool. Any user actions that affect the lifetime of the auto Pool while the Job is active will result in unexpected behavior. You must specify either the Pool ID or the auto Pool specification, but not both. */ autoPoolSpecification?: AutoPoolSpecificationOutput; } @@ -2059,172 +1249,70 @@ export interface PoolInformationOutput { * create this auto Pool when the Job is submitted. */ export interface AutoPoolSpecificationOutput { - /** - * The Batch service assigns each auto Pool a unique identifier on creation. To - * distinguish between Pools created for different purposes, you can specify this - * element to add a prefix to the ID that is assigned. The prefix can be up to 20 - * characters long. - */ + /** A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. */ autoPoolIdPrefix?: string; /** - * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule - * are assigned to Pools. + * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. * * Possible values: jobschedule, job */ poolLifetimeOption: string; - /** - * If false, the Batch service deletes the Pool once its lifetime (as determined - * by the poolLifetimeOption setting) expires; that is, when the Job or Job - * Schedule completes. If true, the Batch service does not delete the Pool - * automatically. It is up to the user to delete auto Pools created with this - * option. - */ + /** Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. */ keepAlive?: boolean; - /** Specification for creating a new Pool. */ + /** The Pool specification for the auto Pool. */ pool?: PoolSpecificationOutput; } /** Specification for creating a new Pool. */ export interface PoolSpecificationOutput { - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ + /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ vmSize: string; - /** - * This property must be specified if the Pool needs to be created with Azure PaaS - * VMs. This property and virtualMachineConfiguration are mutually exclusive and - * one of the properties must be specified. If neither is specified then the Batch - * service returns an error; if you are calling the REST API directly, the HTTP - * status code is 400 (Bad Request). This property cannot be specified if the - * Batch Account was created with its poolAllocationMode property set to - * 'UserSubscription'. - */ + /** The cloud service configuration for the Pool. This property must be specified if the Pool needs to be created with Azure PaaS VMs. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ cloudServiceConfiguration?: CloudServiceConfigurationOutput; - /** - * This property must be specified if the Pool needs to be created with Azure IaaS - * VMs. This property and cloudServiceConfiguration are mutually exclusive and one - * of the properties must be specified. If neither is specified then the Batch - * service returns an error; if you are calling the REST API directly, the HTTP - * status code is 400 (Bad Request). - */ + /** The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ virtualMachineConfiguration?: VirtualMachineConfigurationOutput; - /** - * The default value is 1. The maximum value is the smaller of 4 times the number - * of cores of the vmSize of the pool or 256. - */ + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ taskSlotsPerNode?: number; - /** If not specified, the default is spread. */ + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ taskSchedulingPolicy?: TaskSchedulingPolicyOutput; - /** - * This timeout applies only to manual scaling; it has no effect when - * enableAutoScale is set to true. The default value is 15 minutes. The minimum - * value is 5 minutes. If you specify a value less than 5 minutes, the Batch - * service rejects the request with an error; if you are calling the REST API - * directly, the HTTP status code is 400 (Bad Request). - */ + /** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ resizeTimeout?: string; - /** - * This property must not be specified if enableAutoScale is set to true. If - * enableAutoScale is set to false, then you must set either targetDedicatedNodes, - * targetLowPriorityNodes, or both. - */ + /** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ targetDedicatedNodes?: number; - /** - * This property must not be specified if enableAutoScale is set to true. If - * enableAutoScale is set to false, then you must set either targetDedicatedNodes, - * targetLowPriorityNodes, or both. - */ + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ targetLowPriorityNodes?: number; - /** - * If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must - * be specified. If true, the autoScaleFormula element is required. The Pool - * automatically resizes according to the formula. The default value is false. - */ + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The Pool automatically resizes according to the formula. The default value is false. */ enableAutoScale?: boolean; - /** - * This property must not be specified if enableAutoScale is set to false. It is - * required if enableAutoScale is set to true. The formula is checked for validity - * before the Pool is created. If the formula is not valid, the Batch service - * rejects the request with detailed error information. - */ + /** The formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. */ autoScaleFormula?: string; - /** - * The default value is 15 minutes. The minimum and maximum value are 5 minutes - * and 168 hours respectively. If you specify a value less than 5 minutes or - * greater than 168 hours, the Batch service rejects the request with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). - */ + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ autoScaleEvaluationInterval?: string; - /** - * Enabling inter-node communication limits the maximum size of the Pool due to - * deployment restrictions on the Compute Nodes of the Pool. This may result in - * the Pool not reaching its desired size. The default value is false. - */ + /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. */ enableInterNodeCommunication?: boolean; - /** The network configuration for a Pool. */ + /** The network configuration for the Pool. */ networkConfiguration?: NetworkConfigurationOutput; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. */ startTask?: StartTaskOutput; /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. + * Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ certificateReferences?: Array; - /** - * When creating a pool, the package's application ID must be fully qualified - * (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - * Changes to Package references affect all new Nodes joining the Pool, but do not - * affect Compute Nodes that are already in the Pool until they are rebooted or - * reimaged. There is a maximum of 10 Package references on any given Pool. - */ + /** The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ applicationPackageReferences?: Array; - /** - * The list of application licenses must be a subset of available Batch service - * application licenses. If a license is requested which is not supported, Pool - * creation will fail. The permitted licenses available on the Pool are 'maya', - * 'vray', '3dsmax', 'arnold'. An additional charge applies for each application - * license added to the Pool. - */ + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. The permitted licenses available on the Pool are 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each application license added to the Pool. */ applicationLicenses?: string[]; /** The list of user Accounts to be created on each Compute Node in the Pool. */ userAccounts?: Array; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ + /** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; - /** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ + /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ mountConfiguration?: Array; /** - * If omitted, the default value is Default. + * The desired node communication mode for the pool. If omitted, the default value is Default. * * Possible values: default, classic, simplified */ @@ -2233,85 +1321,72 @@ export interface PoolSpecificationOutput { /** The network configuration for the Job. */ export interface JobNetworkConfigurationOutput { - /** - * The virtual network must be in the same region and subscription as the Azure - * Batch Account. The specified subnet should have enough free IP addresses to - * accommodate the number of Compute Nodes which will run Tasks from the Job. This - * can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' - * service principal must have the 'Classic Virtual Machine Contributor' - * Role-Based Access Control (RBAC) role for the specified VNet so that Azure - * Batch service can schedule Tasks on the Nodes. This can be verified by checking - * if the specified VNet has any associated Network Security Groups (NSG). If - * communication to the Nodes in the specified subnet is denied by an NSG, then - * the Batch service will set the state of the Compute Nodes to unusable. This is - * of the form - * /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - * If the specified VNet has any associated Network Security Groups (NSG), then a - * few reserved system ports must be enabled for inbound communication from the - * Azure Batch service. For Pools created with a Virtual Machine configuration, - * enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for - * Windows. Port 443 is also required to be open for outbound connections for - * communications to Azure Storage. For more details see: - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration - */ + /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId: string; } /** Contains information about the execution of a Job in the Azure Batch service. */ export interface JobExecutionInformationOutput { - /** This is the time at which the Job was created. */ + /** The start time of the Job. This is the time at which the Job was created. */ startTime: string; - /** This property is set only if the Job is in the completed state. */ + /** The completion time of the Job. This property is set only if the Job is in the completed state. */ endTime?: string; - /** - * This element contains the actual Pool where the Job is assigned. When you get - * Job details from the service, they also contain a poolInfo element, which - * contains the Pool configuration data from when the Job was added or updated. - * That poolInfo element may also contain a poolId element. If it does, the two - * IDs are the same. If it does not, it means the Job ran on an auto Pool, and - * this property contains the ID of that auto Pool. - */ + /** The ID of the Pool to which this Job is assigned. This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool. */ poolId?: string; - /** This property is not set if there was no error starting the Job. */ + /** Details of any error encountered by the service in starting the Job. This property is not set if there was no error starting the Job. */ schedulingError?: JobSchedulingErrorOutput; - /** - * This property is set only if the Job is in the completed state. If the Batch - * service terminates the Job, it sets the reason as follows: JMComplete - the Job - * Manager Task completed, and killJobOnCompletion was set to true. - * MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. - * TerminateJobSchedule - the Job ran as part of a schedule, and the schedule - * terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to - * terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's - * onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in - * the Job failed with an exit condition that specified a jobAction of - * terminatejob. Any other string is a user-defined reason specified in a call to - * the 'Terminate a Job' operation. - */ + /** A string describing the reason the Job ended. This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation. */ terminateReason?: string; } /** An error encountered by the Batch service when scheduling a Job. */ export interface JobSchedulingErrorOutput { /** - * The category of the error. + * The category of the Job scheduling error. * * Possible values: usererror, servererror */ category: string; - /** - * An identifier for the Job scheduling error. Codes are invariant and are - * intended to be consumed programmatically. - */ + /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; - /** - * A message describing the Job scheduling error, intended to be suitable for - * display in a user interface. - */ + /** A message describing the Job scheduling error, intended to be suitable for display in a user interface. */ message?: string; /** A list of additional error details related to the scheduling error. */ details?: Array; } +/** Resource usage statistics for a Job. */ +export interface JobStatisticsOutput { + /** The URL of the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: string; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: string; + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ + userCPUTime: string; + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ + kernelCPUTime: string; + /** The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */ + wallClockTime: string; + /** The total number of disk read operations made by all Tasks in the Job. */ + readIOps: number; + /** The total number of disk write operations made by all Tasks in the Job. */ + writeIOps: number; + /** The total amount of data in GiB read from disk by all Tasks in the Job. */ + readIOGiB: number; + /** The total amount of data in GiB written to disk by all Tasks in the Job. */ + writeIOGiB: number; + /** The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. */ + numSucceededTasks: number; + /** The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. */ + numFailedTasks: number; + /** The total number of retries on all the Tasks in the Job during the given time range. */ + numTaskRetries: number; + /** The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */ + waitTime: string; +} + /** The result of listing the Jobs in an Account. */ export interface BatchJobListResultOutput { /** The list of Jobs. */ @@ -2339,12 +1414,9 @@ export interface JobPreparationAndReleaseTaskExecutionInformationOutput { nodeId?: string; /** The URL of the Compute Node to which this entry refers. */ nodeUrl?: string; - /** - * Contains information about the execution of a Job Preparation Task on a Compute - * Node. - */ + /** Information about the execution status of the Job Preparation Task on this Compute Node. */ jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformationOutput; - /** This property is set only if the Job Release Task has run on the Compute Node. */ + /** Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. */ jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformationOutput; } @@ -2353,12 +1425,9 @@ export interface JobPreparationAndReleaseTaskExecutionInformationOutput { * Node. */ export interface JobPreparationTaskExecutionInformationOutput { - /** - * If the Task has been restarted or retried, this is the most recent time at - * which the Task started running. - */ + /** The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. */ startTime: string; - /** This property is set only if the Task is in the Completed state. */ + /** The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. */ endTime?: string; /** * The current state of the Job Preparation Task on the Compute Node. @@ -2366,46 +1435,22 @@ export interface JobPreparationTaskExecutionInformationOutput { * Possible values: running, completed */ state: string; - /** - * The root directory of the Job Preparation Task on the Compute Node. You can use - * this path to retrieve files created by the Task, such as log files. - */ + /** The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ taskRootDirectory?: string; /** The URL to the root directory of the Job Preparation Task on the Compute Node. */ taskRootDirectoryUrl?: string; - /** - * This parameter is returned only if the Task is in the completed state. The exit - * code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. Note that the exit code may also be generated by the - * Compute Node operating system, such as when a process is forcibly terminated. - */ + /** The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. */ exitCode?: number; - /** This property is set only if the Task runs in a container context. */ + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: TaskFailureInformationOutput; - /** - * Task application failures (non-zero exit code) are retried, pre-processing - * errors (the Task could not be run) and file upload errors are not retried. The - * Batch service will retry the Task up to the limit specified by the constraints. - */ + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ retryCount: number; - /** - * This property is set only if the Task was retried (i.e. retryCount is nonzero). - * If present, this is typically the same as startTime, but may be different if - * the Task has been restarted for reasons other than retry; for example, if the - * Compute Node was rebooted during a retry, then the startTime is updated but the - * lastRetryTime is not. - */ + /** The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ lastRetryTime?: string; /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. * * Possible values: success, failure */ @@ -2416,35 +1461,23 @@ export interface JobPreparationTaskExecutionInformationOutput { export interface TaskContainerExecutionInformationOutput { /** The ID of the container. */ containerId?: string; - /** - * This is the state of the container according to the Docker service. It is - * equivalent to the status field returned by "docker inspect". - */ + /** The state of the container. This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect". */ state?: string; - /** - * This is the detailed error string from the Docker service, if available. It is - * equivalent to the error field returned by "docker inspect". - */ + /** Detailed error information about the container. This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect". */ error?: string; } /** Information about a Task failure. */ export interface TaskFailureInformationOutput { /** - * The category of the error. + * The category of the Task error. * * Possible values: usererror, servererror */ category: string; - /** - * An identifier for the Task error. Codes are invariant and are intended to be - * consumed programmatically. - */ + /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; - /** - * A message describing the Task error, intended to be suitable for display in a - * user interface. - */ + /** A message describing the Task error, intended to be suitable for display in a user interface. */ message?: string; /** A list of additional details related to the error. */ details?: Array; @@ -2455,12 +1488,9 @@ export interface TaskFailureInformationOutput { * Node. */ export interface JobReleaseTaskExecutionInformationOutput { - /** - * If the Task has been restarted or retried, this is the most recent time at - * which the Task started running. - */ + /** The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. */ startTime: string; - /** This property is set only if the Task is in the Completed state. */ + /** The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. */ endTime?: string; /** * The current state of the Job Release Task on the Compute Node. @@ -2468,32 +1498,18 @@ export interface JobReleaseTaskExecutionInformationOutput { * Possible values: running, completed */ state: string; - /** - * The root directory of the Job Release Task on the Compute Node. You can use - * this path to retrieve files created by the Task, such as log files. - */ + /** The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ taskRootDirectory?: string; /** The URL to the root directory of the Job Release Task on the Compute Node. */ taskRootDirectoryUrl?: string; - /** - * This parameter is returned only if the Task is in the completed state. The exit - * code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. Note that the exit code may also be generated by the - * Compute Node operating system, such as when a process is forcibly terminated. - */ + /** The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. */ exitCode?: number; - /** This property is set only if the Task runs in a container context. */ + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: TaskFailureInformationOutput; /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. * * Possible values: success, failure */ @@ -2502,9 +1518,9 @@ export interface JobReleaseTaskExecutionInformationOutput { /** The Task and TaskSlot counts for a Job. */ export interface TaskCountsResultOutput { - /** The Task counts for a Job. */ - readonly taskCounts: TaskCountsOutput; - /** The TaskSlot counts for a Job. */ + /** The number of Tasks per state. */ + taskCounts: TaskCountsOutput; + /** The number of TaskSlots required by Tasks per state. */ taskSlotCounts: TaskSlotCountsOutput; } @@ -2516,15 +1532,9 @@ export interface TaskCountsOutput { running: number; /** The number of Tasks in the completed state. */ completed: number; - /** - * The number of Tasks which succeeded. A Task succeeds if its result (found in - * the executionInfo property) is 'success'. - */ + /** The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'. */ succeeded: number; - /** - * The number of Tasks which failed. A Task fails if its result (found in the - * executionInfo property) is 'failure'. - */ + /** The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'. */ failed: number; } @@ -2546,14 +1556,11 @@ export interface TaskSlotCountsOutput { * A Certificate that can be installed on Compute Nodes and can be used to * authenticate operations on the machine. */ -export interface CertificateOutput { - /** - * The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex - * digits. - */ - thumbprint?: string; - /** The algorithm used to derive the thumbprint. */ - thumbprintAlgorithm?: string; +export interface BatchCertificateOutput { + /** The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). */ + thumbprint: string; + /** The algorithm used to derive the thumbprint. This must be sha1. */ + thumbprintAlgorithm: string; /** The URL of the Certificate. */ readonly url?: string; /** @@ -2565,122 +1572,66 @@ export interface CertificateOutput { /** The time at which the Certificate entered its current state. */ readonly stateTransitionTime?: string; /** - * This property is not set if the Certificate is in its initial active state. + * The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. * * Possible values: active, deleting, deletefailed */ readonly previousState?: string; - /** This property is not set if the Certificate is in its initial Active state. */ + /** The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. */ readonly previousStateTransitionTime?: string; /** The public part of the Certificate as a base-64 encoded .cer file. */ readonly publicData?: string; - /** This property is set only if the Certificate is in the DeleteFailed state. */ + /** The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. */ readonly deleteCertificateError?: DeleteCertificateErrorOutput; /** The base64-encoded contents of the Certificate. The maximum size is 10KB. */ - data?: string; + data: string; /** * The format of the Certificate data. * * Possible values: pfx, cer */ certificateFormat?: string; - /** This must be omitted if the Certificate format is cer. */ + /** The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. */ password?: string; } /** An error encountered by the Batch service when deleting a Certificate. */ export interface DeleteCertificateErrorOutput { - /** - * An identifier for the Certificate deletion error. Codes are invariant and are - * intended to be consumed programmatically. - */ + /** An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; - /** - * A message describing the Certificate deletion error, intended to be suitable - * for display in a user interface. - */ + /** A message describing the Certificate deletion error, intended to be suitable for display in a user interface. */ message?: string; - /** - * This list includes details such as the active Pools and Compute Nodes - * referencing this Certificate. However, if a large number of resources reference - * the Certificate, the list contains only about the first hundred. - */ + /** A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. */ values?: Array; } /** The result of listing the Certificates in the Account. */ export interface CertificateListResultOutput { /** The list of Certificates. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** - * The result of listing the files on a Compute Node, or the files associated with - * a Task on a Compute Node. - */ -export interface NodeFileListResultOutput { - /** The list of files. */ - value?: Array; + value?: Array; /** The URL to get the next set of results. */ "odata.nextLink"?: string; } -/** Information about a file or directory on a Compute Node. */ -export interface NodeFileOutput { - /** The file path. */ - name?: string; - /** The URL of the file. */ - url?: string; - /** Whether the object represents a directory. */ - isDirectory?: boolean; - /** The properties of a file on a Compute Node. */ - properties?: FilePropertiesOutput; -} - -/** The properties of a file on a Compute Node. */ -export interface FilePropertiesOutput { - /** The creation time is not returned for files on Linux Compute Nodes. */ - creationTime?: string; - /** The time at which the file was last modified. */ - lastModified: string; - /** The length of the file. */ - contentLength: number; - /** The content type of the file. */ - contentType?: string; - /** The file mode is returned only for files on Linux Compute Nodes. */ - fileMode?: string; -} - /** * A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a * specification used to create each Job. */ export interface BatchJobScheduleOutput { /** A string that uniquely identifies the schedule within the Account. */ - id?: string; + readonly id?: string; /** The display name for the schedule. */ - displayName?: string; + readonly displayName?: string; /** The URL of the Job Schedule. */ readonly url?: string; - /** - * This is an opaque string. You can use it to detect whether the Job Schedule has - * changed between requests. In particular, you can be pass the ETag with an - * Update Job Schedule request to specify that your changes should take effect - * only if nobody else has modified the schedule in the meantime. - */ + /** The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. */ readonly eTag?: string; - /** - * This is the last time at which the schedule level data, such as the Job - * specification or recurrence information, changed. It does not factor in - * job-level changes such as new Jobs being created or Jobs changing state. - */ + /** The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. */ readonly lastModified?: string; /** The creation time of the Job Schedule. */ readonly creationTime?: string; /** - * The state of the Job Schedule. + * The current state of the Job Schedule. * * Possible values: active, completed, disabled, terminating, deleting */ @@ -2688,31 +1639,22 @@ export interface BatchJobScheduleOutput { /** The time at which the Job Schedule entered the current state. */ readonly stateTransitionTime?: string; /** - * This property is not present if the Job Schedule is in its initial active state. + * The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. * * Possible values: active, completed, disabled, terminating, deleting */ readonly previousState?: string; - /** This property is not present if the Job Schedule is in its initial active state. */ + /** The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. */ readonly previousStateTransitionTime?: string; - /** - * All times are fixed respective to UTC and are not impacted by daylight saving - * time. - */ - schedule?: ScheduleOutput; - /** Specifies details of the Jobs to be created on a schedule. */ - jobSpecification?: JobSpecificationOutput; - /** - * Contains information about Jobs that have been and will be run under a Job - * Schedule. - */ + /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. */ + schedule: ScheduleOutput; + /** The details of the Jobs to be created on this schedule. */ + jobSpecification: JobSpecificationOutput; + /** Information about Jobs that have been and will be run under this schedule. */ readonly executionInfo?: JobScheduleExecutionInformationOutput; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ + /** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; - /** Resource usage statistics for a Job Schedule. */ + /** The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ readonly stats?: JobScheduleStatisticsOutput; } @@ -2721,137 +1663,55 @@ export interface BatchJobScheduleOutput { * respective to UTC and are not impacted by daylight saving time. */ export interface ScheduleOutput { - /** - * If you do not specify a doNotRunUntil time, the schedule becomes ready to - * create Jobs immediately. - */ + /** The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately. */ doNotRunUntil?: string; - /** - * If you do not specify a doNotRunAfter time, and you are creating a recurring - * Job Schedule, the Job Schedule will remain active until you explicitly - * terminate it. - */ + /** A time after which no Job will be created under this Job Schedule. The schedule will move to the completed state as soon as this deadline is past and there is no active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it. */ doNotRunAfter?: string; - /** - * If a Job is not created within the startWindow interval, then the 'opportunity' - * is lost; no Job will be created until the next recurrence of the schedule. If - * the schedule is recurring, and the startWindow is longer than the recurrence - * interval, then this is equivalent to an infinite startWindow, because the Job - * that is 'due' in one recurrenceInterval is not carried forward into the next - * recurrence interval. The default is infinite. The minimum value is 1 minute. If - * you specify a lower value, the Batch service rejects the schedule with an - * error; if you are calling the REST API directly, the HTTP status code is 400 - * (Bad Request). - */ + /** The time interval, starting from the time at which the schedule indicates a Job should be created, within which a Job must be created. If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ startWindow?: string; - /** - * Because a Job Schedule can have at most one active Job under it at any given - * time, if it is time to create a new Job under a Job Schedule, but the previous - * Job is still running, the Batch service will not create the new Job until the - * previous Job finishes. If the previous Job does not finish within the - * startWindow period of the new recurrenceInterval, then no new Job will be - * scheduled for that interval. For recurring Jobs, you should normally specify a - * jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you - * will need an external process to monitor when Jobs are created, add Tasks to - * the Jobs and terminate the Jobs ready for the next recurrence. The default is - * that the schedule does not recur: one Job is created, within the startWindow - * after the doNotRunUntil time, and the schedule is complete as soon as that Job - * finishes. The minimum value is 1 minute. If you specify a lower value, the - * Batch service rejects the schedule with an error; if you are calling the REST - * API directly, the HTTP status code is 400 (Bad Request). - */ + /** The time interval between the start times of two successive Jobs under the Job Schedule. A Job Schedule can have at most one active Job under it at any given time. Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ recurrenceInterval?: string; } /** Specifies details of the Jobs to be created on a schedule. */ export interface JobSpecificationOutput { - /** - * Priority values can range from -1000 to 1000, with -1000 being the lowest - * priority and 1000 being the highest priority. The default value is 0. This - * priority is used as the default for all Jobs under the Job Schedule. You can - * update a Job's priority after it has been created using by using the update Job - * API. - */ + /** The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. */ priority?: number; - /** - * If the value is set to True, other high priority jobs submitted to the system - * will take precedence and will be able requeue tasks from this job. You can - * update a job's allowTaskPreemption after it has been created using the update - * job API. - */ + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ allowTaskPreemption?: boolean; - /** - * The value of maxParallelTasks must be -1 or greater than 0 if specified. If not - * specified, the default value is -1, which means there's no limit to the number - * of tasks that can be run at once. You can update a job's maxParallelTasks after - * it has been created using the update job API. - */ + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ maxParallelTasks?: number; - /** - * The name need not be unique and can contain any Unicode characters up to a - * maximum length of 1024. - */ + /** The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** - * Whether Tasks in the Job can define dependencies on each other. The default is - * false. - */ + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ usesTaskDependencies?: boolean; /** - * Note that if a Job contains no Tasks, then all Tasks are considered complete. - * This option is therefore most commonly used with a Job Manager task; if you - * want to use automatic Job termination without a Job Manager, you should - * initially set onAllTasksComplete to noaction and update the Job properties to - * set onAllTasksComplete to terminatejob once you have finished adding Tasks. The - * default is noaction. + * The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. * * Possible values: noaction, terminatejob */ onAllTasksComplete?: string; /** - * The default is noaction. + * The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. * * Possible values: noaction, performexitoptionsjobaction */ onTaskFailure?: string; /** The network configuration for the Job. */ networkConfiguration?: JobNetworkConfigurationOutput; - /** The execution constraints for a Job. */ + /** The execution constraints for Jobs created under this schedule. */ constraints?: JobConstraintsOutput; - /** - * If the Job does not specify a Job Manager Task, the user must explicitly add - * Tasks to the Job using the Task API. If the Job does specify a Job Manager - * Task, the Batch service creates the Job Manager Task when the Job is created, - * and will try to schedule the Job Manager Task before scheduling other Tasks in - * the Job. - */ + /** The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. */ jobManagerTask?: JobManagerTaskOutput; - /** - * If a Job has a Job Preparation Task, the Batch service will run the Job - * Preparation Task on a Node before starting any Tasks of that Job on that - * Compute Node. - */ + /** The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ jobPreparationTask?: JobPreparationTaskOutput; - /** - * The primary purpose of the Job Release Task is to undo changes to Nodes made by - * the Job Preparation Task. Example activities include deleting local files, or - * shutting down services that were started as part of Job preparation. A Job - * Release Task cannot be specified without also specifying a Job Preparation Task - * for the Job. The Batch service runs the Job Release Task on the Compute Nodes - * that have run the Job Preparation Task. - */ + /** The Job Release Task for Jobs created under this schedule. The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task. */ jobReleaseTask?: JobReleaseTaskOutput; - /** - * Individual Tasks can override an environment setting specified here by - * specifying the same setting name with a different value. - */ + /** A list of common environment variable settings. These environment variables are set for all Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ commonEnvironmentSettings?: Array; - /** Specifies how a Job should be assigned to a Pool. */ + /** The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. */ poolInfo: PoolInformationOutput; - /** - * The Batch service does not assign any meaning to metadata; it is solely for the - * use of user code. - */ + /** A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; } @@ -2860,18 +1720,11 @@ export interface JobSpecificationOutput { * Schedule. */ export interface JobScheduleExecutionInformationOutput { - /** - * This property is meaningful only if the schedule is in the active state when - * the time comes around. For example, if the schedule is disabled, no Job will be - * created at nextRunTime unless the Job is enabled before then. - */ + /** The next time at which a Job will be created under this schedule. This property is meaningful only if the schedule is in the active state when the time comes around. For example, if the schedule is disabled, no Job will be created at nextRunTime unless the Job is enabled before then. */ nextRunTime?: string; - /** - * This property is present only if the at least one Job has run under the - * schedule. - */ + /** Information about the most recent Job under the Job Schedule. This property is present only if the at least one Job has run under the schedule. */ recentJob?: RecentJobOutput; - /** This property is set only if the Job Schedule is in the completed state. */ + /** The time at which the schedule ended. This property is set only if the Job Schedule is in the completed state. */ endTime?: string; } @@ -2889,69 +1742,29 @@ export interface JobScheduleStatisticsOutput { url: string; /** The start time of the time range covered by the statistics. */ startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ lastUpdateTime: string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in all Jobs created under the schedule. - */ + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. */ userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by all Tasks in all Jobs created under the schedule. - */ + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. */ kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on a - * Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If a Task was retried, this - * includes the wall clock time of all the Task retries. - */ + /** The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */ wallClockTime: string; - /** - * The total number of disk read operations made by all Tasks in all Jobs created - * under the schedule. - */ + /** The total number of disk read operations made by all Tasks in all Jobs created under the schedule. */ readIOps: number; - /** - * The total number of disk write operations made by all Tasks in all Jobs created - * under the schedule. - */ + /** The total number of disk write operations made by all Tasks in all Jobs created under the schedule. */ writeIOps: number; - /** - * The total gibibytes read from disk by all Tasks in all Jobs created under the - * schedule. - */ + /** The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. */ readIOGiB: number; - /** - * The total gibibytes written to disk by all Tasks in all Jobs created under the - * schedule. - */ + /** The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. */ writeIOGiB: number; - /** - * The total number of Tasks successfully completed during the given time range in - * Jobs created under the schedule. A Task completes successfully if it returns - * exit code 0. - */ + /** The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. */ numSucceededTasks: number; - /** - * The total number of Tasks that failed during the given time range in Jobs - * created under the schedule. A Task fails if it exhausts its maximum retry count - * without returning exit code 0. - */ + /** The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. */ numFailedTasks: number; - /** - * The total number of retries during the given time range on all Tasks in all - * Jobs created under the schedule. - */ + /** The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. */ numTaskRetries: number; - /** - * This value is only reported in the Account lifetime statistics; it is not - * included in the Job statistics. - */ + /** The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */ waitTime: string; } @@ -2963,183 +1776,53 @@ export interface BatchJobScheduleListResultOutput { "odata.nextLink"?: string; } -/** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. - */ -export interface BatchTaskOutput { - /** - * The ID can contain any combination of alphanumeric characters including hyphens - * and underscores, and cannot contain more than 64 characters. - */ - id?: string; - /** - * The display name need not be unique and can contain any Unicode characters up - * to a maximum length of 1024. - */ +/** Options for creating an Azure Batch Task. */ +export interface BatchTaskCreateOptionsOutput { + /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). */ + id: string; + /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** The URL of the Task. */ - readonly url?: string; - /** - * This is an opaque string. You can use it to detect whether the Task has changed - * between requests. In particular, you can be pass the ETag when updating a Task - * to specify that your changes should take effect only if nobody else has - * modified the Task in the meantime. - */ - readonly eTag?: string; - /** The last modified time of the Task. */ - readonly lastModified?: string; - /** The creation time of the Task. */ - readonly creationTime?: string; /** How the Batch service should respond when the Task completes. */ exitConditions?: ExitConditionsOutput; - /** - * The state of the Task. - * - * Possible values: active, preparing, running, completed - */ - readonly state?: string; - /** The time at which the Task entered its current state. */ - readonly stateTransitionTime?: string; - /** - * This property is not set if the Task is in its initial Active state. - * - * Possible values: active, preparing, running, completed - */ - readonly previousState?: string; - /** This property is not set if the Task is in its initial Active state. */ - readonly previousStateTransitionTime?: string; - /** - * For multi-instance Tasks, the command line is executed as the primary Task, - * after the primary Task and all subtasks have finished executing the - * coordination command line. The command line does not run under a shell, and - * therefore cannot take advantage of shell features such as environment variable - * expansion. If you want to take advantage of such features, you should invoke - * the shell in the command line, for example using "cmd /c MyCommand" in - * Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - * file paths, it should use a relative path (relative to the Task working - * directory), or use the Batch provided environment variable - * (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - */ - commandLine?: string; - /** - * If the Pool that will run this Task has containerConfiguration set, this must - * be set as well. If the Pool that will run this Task doesn't have - * containerConfiguration set, this must not be set. When this is specified, all - * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure - * Batch directories on the node) are mapped into the container, all Task - * environment variables are mapped into the container, and the Task command line - * is executed in the container. Files produced in the container outside of - * AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that - * Batch file APIs will not be able to access those files. - */ + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: TaskContainerSettingsOutput; - /** - * For multi-instance Tasks, the resource files will only be downloaded to the - * Compute Node on which the primary Task is executed. There is a maximum size for - * the list of resource files. When the max size is exceeded, the request will - * fail and the response error code will be RequestEntityTooLarge. If this occurs, - * the collection of ResourceFiles must be reduced in size. This can be achieved - * using .zip files, Application Packages, or Docker Containers. - */ + /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ resourceFiles?: Array; - /** - * For multi-instance Tasks, the files will only be uploaded from the Compute Node - * on which the primary Task is executed. - */ + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ outputFiles?: Array; /** A list of environment variable settings for the Task. */ environmentSettings?: Array; - /** - * A locality hint that can be used by the Batch service to select a Compute Node - * on which to start a Task. - */ + /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ affinityInfo?: AffinityInformationOutput; - /** Execution constraints to apply to a Task. */ + /** The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. */ constraints?: TaskConstraintsOutput; - /** - * The default is 1. A Task can only be scheduled to run on a compute node if the - * node has enough free scheduling slots available. For multi-instance Tasks, this - * must be 1. - */ + /** The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ requiredSlots?: number; - /** If omitted, the Task runs as a non-administrative user unique to the Task. */ + /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ userIdentity?: UserIdentityOutput; - /** Information about the execution of a Task. */ - readonly executionInfo?: TaskExecutionInformationOutput; - /** Information about the Compute Node on which a Task ran. */ - readonly nodeInfo?: ComputeNodeInformationOutput; - /** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ + /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ multiInstanceSettings?: MultiInstanceSettingsOutput; - /** Resource usage statistics for a Task. */ - readonly stats?: TaskStatisticsOutput; - /** - * This Task will not be scheduled until all Tasks that it depends on have - * completed successfully. If any of those Tasks fail and exhaust their retry - * counts, this Task will never be scheduled. - */ + /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. */ dependsOn?: TaskDependenciesOutput; - /** - * Application packages are downloaded and deployed to a shared directory, not the - * Task working directory. Therefore, if a referenced package is already on the - * Node, and is up to date, then it is not re-downloaded; the existing copy on the - * Compute Node is used. If a referenced Package cannot be installed, for example - * because the package has been deleted or because download failed, the Task - * fails. - */ + /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ applicationPackageReferences?: Array; - /** - * If this property is set, the Batch service provides the Task with an - * authentication token which can be used to authenticate Batch service operations - * without requiring an Account access key. The token is provided via the - * AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the - * Task can carry out using the token depend on the settings. For example, a Task - * can request Job permissions in order to add other Tasks to the Job, or check - * the status of the Job or of other Tasks under the Job. - */ + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ authenticationTokenSettings?: AuthenticationTokenSettingsOutput; } /** Specifies how the Batch service should respond when the Task completes. */ export interface ExitConditionsOutput { - /** - * A list of individual Task exit codes and how the Batch service should respond - * to them. - */ + /** A list of individual Task exit codes and how the Batch service should respond to them. */ exitCodes?: Array; - /** - * A list of Task exit code ranges and how the Batch service should respond to - * them. - */ + /** A list of Task exit code ranges and how the Batch service should respond to them. */ exitCodeRanges?: Array; - /** Specifies how the Batch service responds to a particular exit condition. */ + /** How the Batch service should respond if the Task fails to start due to an error. */ preProcessingError?: ExitOptionsOutput; - /** - * If the Task exited with an exit code that was specified via exitCodes or - * exitCodeRanges, and then encountered a file upload error, then the action - * specified by the exit code takes precedence. - */ + /** How the Batch service should respond if a file upload error occurs. If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence. */ fileUploadError?: ExitOptionsOutput; - /** - * This value is used if the Task exits with any nonzero exit code not listed in - * the exitCodes or exitCodeRanges collection, with a pre-processing error if the - * preProcessingError property is not present, or with a file upload error if the - * fileUploadError property is not present. If you want non-default behavior on - * exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges - * collection. - */ + /** How the Batch service should respond if the Task fails with an exit condition not covered by any of the other properties. This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. */ default?: ExitOptionsOutput; } @@ -3150,26 +1833,20 @@ export interface ExitConditionsOutput { export interface ExitCodeMappingOutput { /** A process exit code. */ code: number; - /** Specifies how the Batch service responds to a particular exit condition. */ + /** How the Batch service should respond if the Task exits with this exit code. */ exitOptions: ExitOptionsOutput; } /** Specifies how the Batch service responds to a particular exit condition. */ export interface ExitOptionsOutput { /** - * The default is none for exit code 0 and terminate for all other exit - * conditions. If the Job's onTaskFailed property is noaction, then specifying - * this property returns an error and the add Task request fails with an invalid - * property value error; if you are calling the REST API directly, the HTTP status - * code is 400 (Bad Request). + * An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). * * Possible values: none, disable, terminate */ jobAction?: string; /** - * Possible values are 'satisfy' (allowing dependent tasks to progress) and - * 'block' (dependent tasks continue to wait). Batch does not yet support - * cancellation of dependent tasks. + * An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. * * Possible values: satisfy, block */ @@ -3177,87 +1854,171 @@ export interface ExitOptionsOutput { } /** - * A range of exit codes and how the Batch service should respond to exit codes - * within that range. + * A range of exit codes and how the Batch service should respond to exit codes + * within that range. + */ +export interface ExitCodeRangeMappingOutput { + /** The first exit code in the range. */ + start: number; + /** The last exit code in the range. */ + end: number; + /** How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive). */ + exitOptions: ExitOptionsOutput; +} + +/** + * A locality hint that can be used by the Batch service to select a Compute Node + * on which to start a Task. + */ +export interface AffinityInformationOutput { + /** An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ + affinityId: string; +} + +/** + * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, + * if any of the subtasks fail (for example due to exiting with a non-zero exit + * code) the entire multi-instance Task fails. The multi-instance Task is then + * terminated and retried, up to its retry limit. + */ +export interface MultiInstanceSettingsOutput { + /** The number of Compute Nodes required by the Task. If omitted, the default is 1. */ + numberOfInstances?: number; + /** The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. */ + coordinationCommandLine: string; + /** A list of files that the Batch service will download before running the coordination command line. The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + commonResourceFiles?: Array; +} + +/** + * Specifies any dependencies of a Task. Any Task that is explicitly specified or + * within a dependency range must complete before the dependant Task will be + * scheduled. + */ +export interface TaskDependenciesOutput { + /** The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead. */ + taskIds?: string[]; + /** The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete successfully before the dependent Task can be scheduled. */ + taskIdRanges?: Array; +} + +/** + * The start and end of the range are inclusive. For example, if a range has start + * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. */ -export interface ExitCodeRangeMappingOutput { - /** The first exit code in the range. */ +export interface TaskIdRangeOutput { + /** The first Task ID in the range. */ start: number; - /** The last exit code in the range. */ + /** The last Task ID in the range. */ end: number; - /** Specifies how the Batch service responds to a particular exit condition. */ - exitOptions: ExitOptionsOutput; +} + +/** The result of listing the Tasks in a Job. */ +export interface BatchTaskListResultOutput { + /** The list of Tasks. */ + value?: Array; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; } /** - * A locality hint that can be used by the Batch service to select a Compute Node - * on which to start a Task. + * Batch will retry Tasks when a recovery operation is triggered on a Node. + * Examples of recovery operations include (but are not limited to) when an + * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + * Retries due to recovery operations are independent of and are not counted + * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + * retry due to a recovery operation may occur. Because of this, all Tasks should + * be idempotent. This means Tasks need to tolerate being interrupted and + * restarted without causing any corruption or duplicate data. The best practice + * for long running Tasks is to use some form of checkpointing. */ -export interface AffinityInformationOutput { +export interface BatchTaskOutput { + /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. */ + readonly id?: string; + /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + readonly displayName?: string; + /** The URL of the Task. */ + readonly url?: string; + /** The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Task. */ + readonly lastModified?: string; + /** The creation time of the Task. */ + readonly creationTime?: string; + /** How the Batch service should respond when the Task completes. */ + readonly exitConditions?: ExitConditionsOutput; + /** + * The current state of the Task. + * + * Possible values: active, preparing, running, completed + */ + readonly state?: string; + /** The time at which the Task entered its current state. */ + readonly stateTransitionTime?: string; /** - * You can pass the affinityId of a Node to indicate that this Task needs to run - * on that Compute Node. Note that this is just a soft affinity. If the target - * Compute Node is busy or unavailable at the time the Task is scheduled, then the - * Task will be scheduled elsewhere. + * The previous state of the Task. This property is not set if the Task is in its initial Active state. + * + * Possible values: active, preparing, running, completed */ - affinityId: string; + readonly previousState?: string; + /** The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. */ + readonly previousStateTransitionTime?: string; + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + readonly commandLine?: string; + /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + readonly containerSettings?: TaskContainerSettingsOutput; + /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + readonly resourceFiles?: Array; + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ + readonly outputFiles?: Array; + /** A list of environment variable settings for the Task. */ + readonly environmentSettings?: Array; + /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ + readonly affinityInfo?: AffinityInformationOutput; + /** The execution constraints that apply to this Task. */ + constraints?: TaskConstraintsOutput; + /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ + readonly requiredSlots?: number; + /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + readonly userIdentity?: UserIdentityOutput; + /** Information about the execution of the Task. */ + readonly executionInfo?: TaskExecutionInformationOutput; + /** Information about the Compute Node on which the Task ran. */ + readonly nodeInfo?: BatchNodeInformationOutput; + /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ + readonly multiInstanceSettings?: MultiInstanceSettingsOutput; + /** Resource usage statistics for the Task. */ + readonly stats?: TaskStatisticsOutput; + /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. */ + readonly dependsOn?: TaskDependenciesOutput; + /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ + readonly applicationPackageReferences?: Array; + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ + readonly authenticationTokenSettings?: AuthenticationTokenSettingsOutput; } /** Information about the execution of a Task. */ export interface TaskExecutionInformationOutput { - /** - * 'Running' corresponds to the running state, so if the Task specifies resource - * files or Packages, then the start time reflects the time at which the Task - * started downloading or deploying these. If the Task has been restarted or - * retried, this is the most recent time at which the Task started running. This - * property is present only for Tasks that are in the running or completed state. - */ + /** The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state. */ startTime?: string; - /** This property is set only if the Task is in the Completed state. */ + /** The time at which the Task completed. This property is set only if the Task is in the Completed state. */ endTime?: string; - /** - * This property is set only if the Task is in the completed state. In general, - * the exit code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. However, if the Batch service terminates the Task (due - * to timeout, or user termination via the API) you may see an operating - * system-defined exit code. - */ + /** The exit code of the program specified on the Task command line. This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ exitCode?: number; - /** This property is set only if the Task runs in a container context. */ + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: TaskFailureInformationOutput; - /** - * Task application failures (non-zero exit code) are retried, pre-processing - * errors (the Task could not be run) and file upload errors are not retried. The - * Batch service will retry the Task up to the limit specified by the constraints. - */ + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ retryCount: number; - /** - * This element is present only if the Task was retried (i.e. retryCount is - * nonzero). If present, this is typically the same as startTime, but may be - * different if the Task has been restarted for reasons other than retry; for - * example, if the Compute Node was rebooted during a retry, then the startTime is - * updated but the lastRetryTime is not. - */ + /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ lastRetryTime?: string; - /** - * When the user removes Compute Nodes from a Pool (by resizing/shrinking the - * pool) or when the Job is being disabled, the user can specify that running - * Tasks on the Compute Nodes be requeued for execution. This count tracks how - * many times the Task has been requeued for these reasons. - */ + /** The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons. */ requeueCount: number; - /** This property is set only if the requeueCount is nonzero. */ + /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ lastRequeueTime?: string; /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. * * Possible values: success, failure */ @@ -3265,11 +2026,8 @@ export interface TaskExecutionInformationOutput { } /** Information about the Compute Node on which a Task ran. */ -export interface ComputeNodeInformationOutput { - /** - * An identifier for the Node on which the Task ran, which can be passed when - * adding a Task to request that the Task be scheduled on this Compute Node. - */ +export interface BatchNodeInformationOutput { + /** An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. */ affinityId?: string; /** The URL of the Compute Node on which the Task ran. */ nodeUrl?: string; @@ -3283,62 +2041,19 @@ export interface ComputeNodeInformationOutput { taskRootDirectoryUrl?: string; } -/** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ -export interface MultiInstanceSettingsOutput { - /** If omitted, the default is 1. */ - numberOfInstances?: number; - /** - * A typical coordination command line launches a background service and verifies - * that the service is ready to process inter-node messages. - */ - coordinationCommandLine: string; - /** - * The difference between common resource files and Task resource files is that - * common resource files are downloaded for all subtasks including the primary, - * whereas Task resource files are downloaded only for the primary. Also note that - * these resource files are not downloaded to the Task working directory, but - * instead are downloaded to the Task root directory (one directory above the - * working directory). There is a maximum size for the list of resource files. - * When the max size is exceeded, the request will fail and the response error - * code will be RequestEntityTooLarge. If this occurs, the collection of - * ResourceFiles must be reduced in size. This can be achieved using .zip files, - * Application Packages, or Docker Containers. - */ - commonResourceFiles?: Array; -} - /** Resource usage statistics for a Task. */ export interface TaskStatisticsOutput { /** The URL of the statistics. */ url: string; /** The start time of the time range covered by the statistics. */ startTime: string; - /** - * The time at which the statistics were last updated. All statistics are limited - * to the range between startTime and lastUpdateTime. - */ + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ lastUpdateTime: string; - /** - * The total user mode CPU time (summed across all cores and all Compute Nodes) - * consumed by the Task. - */ + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ userCPUTime: string; - /** - * The total kernel mode CPU time (summed across all cores and all Compute Nodes) - * consumed by the Task. - */ + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ kernelCPUTime: string; - /** - * The wall clock time is the elapsed time from when the Task started running on a - * Compute Node to when it finished (or to the last time the statistics were - * updated, if the Task had not finished by then). If the Task was retried, this - * includes the wall clock time of all the Task retries. - */ + /** The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. */ wallClockTime: string; /** The total number of disk read operations made by the Task. */ readIOps: number; @@ -3348,52 +2063,14 @@ export interface TaskStatisticsOutput { readIOGiB: number; /** The total gibibytes written to disk by the Task. */ writeIOGiB: number; - /** - * The total wait time of the Task. The wait time for a Task is defined as the - * elapsed time between the creation of the Task and the start of Task execution. - * (If the Task is retried due to failures, the wait time is the time to the most - * recent Task execution.) - */ + /** The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). */ waitTime: string; } -/** - * Specifies any dependencies of a Task. Any Task that is explicitly specified or - * within a dependency range must complete before the dependant Task will be - * scheduled. - */ -export interface TaskDependenciesOutput { - /** - * The taskIds collection is limited to 64000 characters total (i.e. the combined - * length of all Task IDs). If the taskIds collection exceeds the maximum length, - * the Add Task request fails with error code TaskDependencyListTooLong. In this - * case consider using Task ID ranges instead. - */ - taskIds?: string[]; - /** - * The list of Task ID ranges that this Task depends on. All Tasks in all ranges - * must complete successfully before the dependent Task can be scheduled. - */ - taskIdRanges?: Array; -} - -/** - * The start and end of the range are inclusive. For example, if a range has start - * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - */ -export interface TaskIdRangeOutput { - /** The first Task ID in the range. */ - start: number; - /** The last Task ID in the range. */ - end: number; -} - -/** The result of listing the Tasks in a Job. */ -export interface BatchTaskListResultOutput { - /** The list of Tasks. */ - value?: Array; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +/** A collection of Azure Batch Tasks to add. */ +export interface BatchTaskCollectionOutput { + /** The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. */ + value: Array; } /** The result of adding a collection of Tasks to a Job. */ @@ -3407,55 +2084,21 @@ export interface TaskAddResultOutput { /** * The status of the add Task request. * - * Possible values: success, clienterror, servererror + * Possible values: Success, clienterror, servererror */ status: string; /** The ID of the Task for which this is the result. */ taskId: string; - /** - * You can use this to detect whether the Task has changed between requests. In - * particular, you can be pass the ETag with an Update Task request to specify - * that your changes should take effect only if nobody else has modified the Job - * in the meantime. - */ + /** The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */ eTag?: string; /** The last modified time of the Task. */ lastModified?: string; /** The URL of the Task, if the Task was successfully added. */ location?: string; - /** An error response received from the Azure Batch service. */ + /** The error encountered while attempting to add the Task. */ error?: BatchErrorOutput; } -/** An error response received from the Azure Batch service. */ -export interface BatchErrorOutput { - /** - * An identifier for the error. Codes are invariant and are intended to be - * consumed programmatically. - */ - code?: string; - /** An error message received in an Azure Batch error response. */ - message?: ErrorMessageOutput; - /** A collection of key-value pairs containing additional details about the error. */ - values?: Array; -} - -/** An error message received in an Azure Batch error response. */ -export interface ErrorMessageOutput { - /** The language code of the error message */ - lang?: string; - /** The text of the message. */ - value?: string; -} - -/** An item of additional information included in an Azure Batch error response. */ -export interface BatchErrorDetailOutput { - /** An identifier specifying the meaning of the Value property. */ - key?: string; - /** The additional information included with the error response. */ - value?: string; -} - /** The result of listing the subtasks of a Task. */ export interface BatchTaskListSubtasksResultOutput { /** The list of subtasks. */ @@ -3466,35 +2109,20 @@ export interface BatchTaskListSubtasksResultOutput { export interface SubtaskInformationOutput { /** The ID of the subtask. */ id?: number; - /** Information about the Compute Node on which a Task ran. */ - nodeInfo?: ComputeNodeInformationOutput; - /** - * The time at which the subtask started running. If the subtask has been - * restarted or retried, this is the most recent time at which the subtask started - * running. - */ + /** Information about the Compute Node on which the subtask ran. */ + nodeInfo?: BatchNodeInformationOutput; + /** The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running. */ startTime?: string; - /** This property is set only if the subtask is in the Completed state. */ + /** The time at which the subtask completed. This property is set only if the subtask is in the Completed state. */ endTime?: string; - /** - * This property is set only if the subtask is in the completed state. In general, - * the exit code for a process reflects the specific convention implemented by the - * application developer for that process. If you use the exit code value to make - * decisions in your code, be sure that you know the exit code convention used by - * the application process. However, if the Batch service terminates the subtask - * (due to timeout, or user termination via the API) you may see an operating - * system-defined exit code. - */ + /** The exit code of the program specified on the subtask command line. This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ exitCode?: number; - /** This property is set only if the Task runs in a container context. */ + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: TaskFailureInformationOutput; /** - * The state of the subtask. + * The current state of the subtask. * * Possible values: preparing, running, completed */ @@ -3502,36 +2130,66 @@ export interface SubtaskInformationOutput { /** The time at which the subtask entered its current state. */ stateTransitionTime?: string; /** - * This property is not set if the subtask is in its initial running state. + * The previous state of the subtask. This property is not set if the subtask is in its initial running state. * * Possible values: preparing, running, completed */ previousState?: string; - /** This property is not set if the subtask is in its initial running state. */ + /** The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. */ previousStateTransitionTime?: string; /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. * * Possible values: success, failure */ result?: string; } +/** + * The result of listing the files on a Compute Node, or the files associated with + * a Task on a Compute Node. + */ +export interface NodeFileListResultOutput { + /** The list of files. */ + value?: Array; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** Information about a file or directory on a Compute Node. */ +export interface NodeFileOutput { + /** The file path. */ + name?: string; + /** The URL of the file. */ + url?: string; + /** Whether the object represents a directory. */ + isDirectory?: boolean; + /** The file properties. */ + properties?: FilePropertiesOutput; +} + +/** The properties of a file on a Compute Node. */ +export interface FilePropertiesOutput { + /** The file creation time. The creation time is not returned for files on Linux Compute Nodes. */ + creationTime?: string; + /** The time at which the file was last modified. */ + lastModified: string; + /** The length of the file. */ + contentLength: number; + /** The content type of the file. */ + contentType?: string; + /** The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes. */ + fileMode?: string; +} + /** A Compute Node in the Batch service. */ -export interface ComputeNodeOutput { - /** - * Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a - * Compute Node is removed from a Pool, all of its local files are deleted, and - * the ID is reclaimed and could be reused for new Compute Nodes. - */ +export interface BatchNodeOutput { + /** The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. */ id?: string; /** The URL of the Compute Node. */ url?: string; /** - * The Spot/Low-priority Compute Node has been preempted. Tasks which were running - * on the Compute Node when it was preempted will be rescheduled when another - * Compute Node becomes available. + * The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. * * Possible values: idle, rebooting, reimaging, running, unusable, creating, starting, waitingforstarttask, starttaskfailed, unknown, leavingpool, offline, preempted */ @@ -3544,103 +2202,44 @@ export interface ComputeNodeOutput { schedulingState?: string; /** The time at which the Compute Node entered its current state. */ stateTransitionTime?: string; - /** This property may not be present if the Compute Node state is unusable. */ + /** The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. */ lastBootTime?: string; - /** - * This is the time when the Compute Node was initially allocated and doesn't - * change once set. It is not updated when the Compute Node is service healed or - * preempted. - */ + /** The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. */ allocationTime?: string; - /** - * Every Compute Node that is added to a Pool is assigned a unique IP address. - * Whenever a Compute Node is removed from a Pool, all of its local files are - * deleted, and the IP address is reclaimed and could be reused for new Compute - * Nodes. - */ + /** The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. */ ipAddress?: string; - /** - * Note that this is just a soft affinity. If the target Compute Node is busy or - * unavailable at the time the Task is scheduled, then the Task will be scheduled - * elsewhere. - */ + /** An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ affinityId?: string; - /** - * For information about available sizes of virtual machines in Pools, see Choose - * a VM size for Compute Nodes in an Azure Batch Pool - * (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - */ + /** The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ vmSize?: string; - /** - * The total number of Job Tasks completed on the Compute Node. This includes Job - * Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - * Tasks. - */ + /** The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ totalTasksRun?: number; - /** - * The total number of currently running Job Tasks on the Compute Node. This - * includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job - * Release or Start Tasks. - */ + /** The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ runningTasksCount?: number; - /** - * The total number of scheduling slots used by currently running Job Tasks on the - * Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job - * Preparation, Job Release or Start Tasks. - */ + /** The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ runningTaskSlotsCount?: number; - /** - * The total number of Job Tasks which completed successfully (with exitCode 0) on - * the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job - * Preparation, Job Release or Start Tasks. - */ + /** The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ totalTasksSucceeded?: number; - /** - * This property is present only if at least one Task has run on this Compute Node - * since it was assigned to the Pool. - */ + /** A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. */ recentTasks?: Array; - /** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. In some cases the - * StartTask may be re-run even though the Compute Node was not rebooted. Special - * care should be taken to avoid StartTasks which create breakaway process or - * install/launch services from the StartTask working directory, as this will - * block Batch from being able to re-run the StartTask. - */ + /** The Task specified to run on the Compute Node as it joins the Pool. */ startTask?: StartTaskOutput; - /** Information about a StartTask running on a Compute Node. */ + /** Runtime information about the execution of the StartTask on the Compute Node. */ startTaskInfo?: StartTaskInformationOutput; /** - * For Windows Nodes, the Batch service installs the Certificates to the specified - * Certificate store and location. For Linux Compute Nodes, the Certificates are - * stored in a directory inside the Task working directory and an environment - * variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this - * location. For Certificates with visibility of 'remoteUser', a 'certs' directory - * is created in the user's home directory (e.g., /home/{user-name}/certs) and - * Certificates are placed in that directory. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ certificateReferences?: Array; /** The list of errors that are currently being encountered by the Compute Node. */ - errors?: Array; - /** - * Whether this Compute Node is a dedicated Compute Node. If false, the Compute - * Node is a Spot/Low-priority Compute Node. - */ + errors?: Array; + /** Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. */ isDedicated?: boolean; /** The endpoint configuration for the Compute Node. */ - endpointConfiguration?: ComputeNodeEndpointConfigurationOutput; - /** - * The Batch Compute Node agent is a program that runs on each Compute Node in the - * Pool and provides Batch capability on the Compute Node. - */ + endpointConfiguration?: BatchNodeEndpointConfigurationOutput; + /** Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. */ nodeAgentInfo?: NodeAgentInformationOutput; /** Info about the current state of the virtual machine. */ virtualMachineInfo?: VirtualMachineInfoOutput; @@ -3657,12 +2256,12 @@ export interface TaskInformationOutput { /** The ID of the subtask if the Task is a multi-instance Task. */ subtaskId?: number; /** - * The state of the Task. + * The current state of the Task. * * Possible values: active, preparing, running, completed */ taskState: string; - /** Information about the execution of a Task. */ + /** Information about the execution of the Task. */ executionInfo?: TaskExecutionInformationOutput; } @@ -3674,51 +2273,22 @@ export interface StartTaskInformationOutput { * Possible values: running, completed */ state: string; - /** - * This value is reset every time the Task is restarted or retried (that is, this - * is the most recent time at which the StartTask started running). - */ + /** The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). */ startTime: string; - /** - * This is the end time of the most recent run of the StartTask, if that run has - * completed (even if that run failed and a retry is pending). This element is not - * present if the StartTask is currently running. - */ + /** The time at which the StartTask stopped running. This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running. */ endTime?: string; - /** - * This property is set only if the StartTask is in the completed state. In - * general, the exit code for a process reflects the specific convention - * implemented by the application developer for that process. If you use the exit - * code value to make decisions in your code, be sure that you know the exit code - * convention used by the application process. However, if the Batch service - * terminates the StartTask (due to timeout, or user termination via the API) you - * may see an operating system-defined exit code. - */ + /** The exit code of the program specified on the StartTask command line. This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ exitCode?: number; - /** This property is set only if the Task runs in a container context. */ + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ containerInfo?: TaskContainerExecutionInformationOutput; - /** - * This property is set only if the Task is in the completed state and encountered - * a failure. - */ + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: TaskFailureInformationOutput; - /** - * Task application failures (non-zero exit code) are retried, pre-processing - * errors (the Task could not be run) and file upload errors are not retried. The - * Batch service will retry the Task up to the limit specified by the constraints. - */ + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ retryCount: number; - /** - * This element is present only if the Task was retried (i.e. retryCount is - * nonzero). If present, this is typically the same as startTime, but may be - * different if the Task has been restarted for reasons other than retry; for - * example, if the Compute Node was rebooted during a retry, then the startTime is - * updated but the lastRetryTime is not. - */ + /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ lastRetryTime?: string; /** - * If the value is 'failed', then the details of the failure can be found in the - * failureInfo property. + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. * * Possible values: success, failure */ @@ -3726,23 +2296,17 @@ export interface StartTaskInformationOutput { } /** An error encountered by a Compute Node. */ -export interface ComputeNodeErrorOutput { - /** - * An identifier for the Compute Node error. Codes are invariant and are intended - * to be consumed programmatically. - */ +export interface BatchNodeErrorOutput { + /** An identifier for the Compute Node error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; - /** - * A message describing the Compute Node error, intended to be suitable for - * display in a user interface. - */ + /** A message describing the Compute Node error, intended to be suitable for display in a user interface. */ message?: string; /** The list of additional error details related to the Compute Node error. */ errorDetails?: Array; } /** The endpoint configuration for the Compute Node. */ -export interface ComputeNodeEndpointConfigurationOutput { +export interface BatchNodeEndpointConfigurationOutput { /** The list of inbound endpoints that are accessible on the Compute Node. */ inboundEndpoints: Array; } @@ -3758,9 +2322,9 @@ export interface InboundEndpointOutput { */ protocol: string; /** The public IP address of the Compute Node. */ - publicIPAddress: string; + publicIPAddress?: string; /** The public fully qualified domain name for the Compute Node. */ - publicFQDN: string; + publicFQDN?: string; /** The public port number of the endpoint. */ frontendPort: number; /** The backend port number of the endpoint. */ @@ -3772,52 +2336,50 @@ export interface InboundEndpointOutput { * Pool and provides Batch capability on the Compute Node. */ export interface NodeAgentInformationOutput { - /** - * This version number can be checked against the Compute Node agent release notes - * located at - * https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. - */ + /** The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. */ version: string; - /** - * This is the most recent time that the Compute Node agent was updated to a new - * version. - */ + /** The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version. */ lastUpdateTime: string; } /** Info about the current state of the virtual machine. */ export interface VirtualMachineInfoOutput { - /** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ + /** The reference to the Azure Virtual Machine's Marketplace Image. */ imageReference?: ImageReferenceOutput; } /** The remote login settings for a Compute Node. */ -export interface ComputeNodeGetRemoteLoginSettingsResultOutput { +export interface BatchNodeRemoteLoginSettingsResultOutput { /** The IP address used for remote login to the Compute Node. */ - readonly remoteLoginIPAddress: string; + remoteLoginIPAddress: string; /** The port used for remote login to the Compute Node. */ remoteLoginPort: number; } +/** The Azure Batch service log files upload options for a Compute Node. */ +export interface UploadBatchServiceLogsOptionsOutput { + /** The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. */ + containerUrl: string; + /** The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. */ + startTime: string; + /** The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. */ + endTime?: string; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ + identityReference?: BatchNodeIdentityReferenceOutput; +} + /** The result of uploading Batch service log files from a specific Compute Node. */ export interface UploadBatchServiceLogsResultOutput { - /** - * The virtual directory name is part of the blob name for each log file uploaded, - * and it is built based poolId, nodeId and a unique identifier. - */ - readonly virtualDirectoryName: string; + /** The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. */ + virtualDirectoryName: string; /** The number of log files which will be uploaded. */ numberOfFilesUploaded: number; } /** The result of listing the Compute Nodes in a Pool. */ -export interface ComputeNodeListResultOutput { +export interface BatchNodeListResultOutput { /** The list of Compute Nodes. */ - value?: Array; + value?: Array; /** The URL to get the next set of results. */ "odata.nextLink"?: string; } @@ -3826,7 +2388,7 @@ export interface ComputeNodeListResultOutput { export interface NodeVMExtensionOutput { /** The provisioning state of the virtual machine extension. */ provisioningState?: string; - /** The configuration for virtual machine extensions. */ + /** The virtual machine extension. */ vmExtension?: VMExtensionOutput; /** The vm extension instance view. */ instanceView?: VMExtensionInstanceViewOutput; @@ -3867,6 +2429,3 @@ export interface NodeVMExtensionListOutput { /** The URL to get the next set of results. */ "odata.nextLink"?: string; } - -/** Paged collection of PoolUsageMetrics items */ -export type PoolUsageMetricsListOutput = Paged; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/paginateHelper.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/paginateHelper.ts index 1c9af35b1e..95baa6a409 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/paginateHelper.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/paginateHelper.ts @@ -66,8 +66,10 @@ export function paginate( // Extract element type from initial response type TElement = PaginateReturn; let firstRun = true; - const itemName = "value"; - const nextLinkName = "nextLink"; + // We need to check the response for success before trying to inspect it looking for + // the properties to use for nextLink and itemName + checkPagingRequest(initialResponse); + const { itemName, nextLinkName } = getPaginationProperties(initialResponse); const { customGetPage } = options; const pagedResult: PagedResult = { firstPageLink: "", @@ -152,3 +154,47 @@ function checkPagingRequest(response: PathUncheckedResponse): void { ); } } + +/** + * Extracts the itemName and nextLinkName from the initial response to use them for pagination + */ +function getPaginationProperties(initialResponse: PathUncheckedResponse) { + // Build a set with the passed custom nextLinkNames + const nextLinkNames = new Set(["nextLink", "odata.nextLink"]); + + // Build a set with the passed custom set of itemNames + const itemNames = new Set(["value"]); + + let nextLinkName: string | undefined; + let itemName: string | undefined; + + for (const name of nextLinkNames) { + const nextLink = (initialResponse.body as Record)[ + name + ] as string; + if (nextLink) { + nextLinkName = name; + break; + } + } + + for (const name of itemNames) { + const item = (initialResponse.body as Record)[ + name + ] as string; + if (item) { + itemName = name; + break; + } + } + + if (!itemName) { + throw new Error( + `Couldn't paginate response\n Body doesn't contain an array property with name: ${[ + ...itemNames, + ].join(" OR ")}` + ); + } + + return { itemName, nextLinkName }; +} diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/parameters.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/parameters.ts index c8ca422b89..cabc390e6f 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/parameters.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/parameters.ts @@ -4,27 +4,34 @@ import { RawHttpHeadersInput } from "@azure/core-rest-pipeline"; import { RequestParameters } from "@azure-rest/core-client"; import { - BatchPool, - BatchPoolEnableAutoScaleParameters, - BatchPoolEvaluateAutoScaleParameters, - BatchPoolResizeParameters, - NodeRemoveParameters, + BatchPoolCreateOptions, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchPoolReplaceOptions, + NodeRemoveOptions, + BatchJobUpdateOptions, BatchJob, - BatchJobDisableParameters, - BatchJobTerminateParameters, - Certificate, + BatchJobDisableOptions, + BatchJobTerminateOptions, + BatchJobCreateOptions, + BatchCertificate, + BatchJobScheduleUpdateOptions, BatchJobSchedule, - BatchTask, + BatchJobScheduleCreateOptions, + BatchTaskCreateOptions, BatchTaskCollection, - ComputeNodeUser, - NodeUpdateUserParameters, - NodeRebootParameters, - NodeReimageParameters, - NodeDisableSchedulingParameters, - UploadBatchServiceLogsConfiguration, + BatchTask, + BatchNodeUserCreateOptions, + BatchNodeUserUpdateOptions, + NodeRebootOptions, + NodeReimageOptions, + NodeDisableSchedulingOptions, + UploadBatchServiceLogsOptions, } from "./models.js"; -export interface ApplicationsListApplicationsHeaders { +export interface ListApplicationsHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -40,7 +47,7 @@ export interface ApplicationsListApplicationsHeaders { "return-client-request-id"?: boolean; } -export interface ApplicationsListApplicationsQueryParamProperties { +export interface ListApplicationsQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -53,22 +60,19 @@ export interface ApplicationsListApplicationsQueryParamProperties { timeOut?: number; } -export interface ApplicationsListApplicationsQueryParam { - queryParameters?: ApplicationsListApplicationsQueryParamProperties; +export interface ListApplicationsQueryParam { + queryParameters?: ListApplicationsQueryParamProperties; } -export interface ApplicationsListApplicationsHeaderParam { - headers?: RawHttpHeadersInput & ApplicationsListApplicationsHeaders; +export interface ListApplicationsHeaderParam { + headers?: RawHttpHeadersInput & ListApplicationsHeaders; } -export type ApplicationsListApplicationsParameters = - ApplicationsListApplicationsQueryParam & - ApplicationsListApplicationsHeaderParam & - RequestParameters; -export type ApplicationsGetParameters = RequestParameters; -export type PoolListUsageMetricsParameters = RequestParameters; +export type ListApplicationsParameters = ListApplicationsQueryParam & + ListApplicationsHeaderParam & + RequestParameters; -export interface PoolGetAllPoolLifetimeStatisticsHeaders { +export interface GetApplicationHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -84,7 +88,7 @@ export interface PoolGetAllPoolLifetimeStatisticsHeaders { "ocp-date"?: string; } -export interface PoolGetAllPoolLifetimeStatisticsQueryParamProperties { +export interface GetApplicationQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -92,20 +96,77 @@ export interface PoolGetAllPoolLifetimeStatisticsQueryParamProperties { timeOut?: number; } -export interface PoolGetAllPoolLifetimeStatisticsQueryParam { - queryParameters?: PoolGetAllPoolLifetimeStatisticsQueryParamProperties; +export interface GetApplicationQueryParam { + queryParameters?: GetApplicationQueryParamProperties; } -export interface PoolGetAllPoolLifetimeStatisticsHeaderParam { - headers?: RawHttpHeadersInput & PoolGetAllPoolLifetimeStatisticsHeaders; +export interface GetApplicationHeaderParam { + headers?: RawHttpHeadersInput & GetApplicationHeaders; } -export type PoolGetAllPoolLifetimeStatisticsParameters = - PoolGetAllPoolLifetimeStatisticsQueryParam & - PoolGetAllPoolLifetimeStatisticsHeaderParam & - RequestParameters; +export type GetApplicationParameters = GetApplicationQueryParam & + GetApplicationHeaderParam & + RequestParameters; + +export interface ListPoolUsageMetricsHeaders { + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; +} + +export interface ListPoolUsageMetricsQueryParamProperties { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + timeOut?: number; + /** + * The earliest time from which to include metrics. This must be at least two and + * a half hours before the current time. If not specified this defaults to the + * start time of the last aggregation interval currently available. + */ + starttime?: Date | string; + /** + * The latest time from which to include metrics. This must be at least two hours + * before the current time. If not specified this defaults to the end time of the + * last aggregation interval currently available. + */ + endtime?: Date | string; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + */ + $filter?: string; +} + +export interface ListPoolUsageMetricsQueryParam { + queryParameters?: ListPoolUsageMetricsQueryParamProperties; +} + +export interface ListPoolUsageMetricsHeaderParam { + headers?: RawHttpHeadersInput & ListPoolUsageMetricsHeaders; +} + +export type ListPoolUsageMetricsParameters = ListPoolUsageMetricsQueryParam & + ListPoolUsageMetricsHeaderParam & + RequestParameters; -export interface PoolAddPoolHeaders { +export interface CreatePoolHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -121,12 +182,12 @@ export interface PoolAddPoolHeaders { "ocp-date"?: string; } -export interface PoolAddPoolBodyParam { - /** The Pool to be added. */ - body: BatchPool; +export interface CreatePoolBodyParam { + /** The Pool to be created. */ + body: BatchPoolCreateOptions; } -export interface PoolAddPoolQueryParamProperties { +export interface CreatePoolQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -134,20 +195,26 @@ export interface PoolAddPoolQueryParamProperties { timeOut?: number; } -export interface PoolAddPoolQueryParam { - queryParameters?: PoolAddPoolQueryParamProperties; +export interface CreatePoolQueryParam { + queryParameters?: CreatePoolQueryParamProperties; +} + +export interface CreatePoolHeaderParam { + headers?: RawHttpHeadersInput & CreatePoolHeaders; } -export interface PoolAddPoolHeaderParam { - headers?: RawHttpHeadersInput & PoolAddPoolHeaders; +export interface CreatePoolMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type PoolAddPoolParameters = PoolAddPoolQueryParam & - PoolAddPoolHeaderParam & - PoolAddPoolBodyParam & +export type CreatePoolParameters = CreatePoolQueryParam & + CreatePoolHeaderParam & + CreatePoolMediaTypesParam & + CreatePoolBodyParam & RequestParameters; -export interface PoolListPoolsHeaders { +export interface ListPoolsHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -163,7 +230,7 @@ export interface PoolListPoolsHeaders { "return-client-request-id"?: boolean; } -export interface PoolListPoolsQueryParamProperties { +export interface ListPoolsQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -180,24 +247,24 @@ export interface PoolListPoolsQueryParamProperties { */ $filter?: string; /** An OData $select clause. */ - $select?: string; + $select?: string[]; /** An OData $expand clause. */ - $expand?: string; + $expand?: string[]; } -export interface PoolListPoolsQueryParam { - queryParameters?: PoolListPoolsQueryParamProperties; +export interface ListPoolsQueryParam { + queryParameters?: ListPoolsQueryParamProperties; } -export interface PoolListPoolsHeaderParam { - headers?: RawHttpHeadersInput & PoolListPoolsHeaders; +export interface ListPoolsHeaderParam { + headers?: RawHttpHeadersInput & ListPoolsHeaders; } -export type PoolListPoolsParameters = PoolListPoolsQueryParam & - PoolListPoolsHeaderParam & +export type ListPoolsParameters = ListPoolsQueryParam & + ListPoolsHeaderParam & RequestParameters; -export interface PoolDeletePoolHeaders { +export interface DeletePoolHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -237,7 +304,7 @@ export interface PoolDeletePoolHeaders { "if-unmodified-since"?: string; } -export interface PoolDeletePoolQueryParamProperties { +export interface DeletePoolQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -245,16 +312,16 @@ export interface PoolDeletePoolQueryParamProperties { timeOut?: number; } -export interface PoolDeletePoolQueryParam { - queryParameters?: PoolDeletePoolQueryParamProperties; +export interface DeletePoolQueryParam { + queryParameters?: DeletePoolQueryParamProperties; } -export interface PoolDeletePoolHeaderParam { - headers?: RawHttpHeadersInput & PoolDeletePoolHeaders; +export interface DeletePoolHeaderParam { + headers?: RawHttpHeadersInput & DeletePoolHeaders; } -export type PoolDeletePoolParameters = PoolDeletePoolQueryParam & - PoolDeletePoolHeaderParam & +export type DeletePoolParameters = DeletePoolQueryParam & + DeletePoolHeaderParam & RequestParameters; export interface PoolExistsHeaders { @@ -317,7 +384,7 @@ export type PoolExistsParameters = PoolExistsQueryParam & PoolExistsHeaderParam & RequestParameters; -export interface PoolGetPoolHeaders { +export interface GetPoolHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -357,31 +424,31 @@ export interface PoolGetPoolHeaders { "if-unmodified-since"?: string; } -export interface PoolGetPoolQueryParamProperties { +export interface GetPoolQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; /** An OData $select clause. */ - $select?: string; + $select?: string[]; /** An OData $expand clause. */ - $expand?: string; + $expand?: string[]; } -export interface PoolGetPoolQueryParam { - queryParameters?: PoolGetPoolQueryParamProperties; +export interface GetPoolQueryParam { + queryParameters?: GetPoolQueryParamProperties; } -export interface PoolGetPoolHeaderParam { - headers?: RawHttpHeadersInput & PoolGetPoolHeaders; +export interface GetPoolHeaderParam { + headers?: RawHttpHeadersInput & GetPoolHeaders; } -export type PoolGetPoolParameters = PoolGetPoolQueryParam & - PoolGetPoolHeaderParam & +export type GetPoolParameters = GetPoolQueryParam & + GetPoolHeaderParam & RequestParameters; -export interface PoolPatchPoolHeaders { +export interface UpdatePoolHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -421,12 +488,12 @@ export interface PoolPatchPoolHeaders { "if-unmodified-since"?: string; } -export interface PoolPatchPoolBodyParam { - /** The parameters for the request. */ - body: BatchPool; +export interface UpdatePoolBodyParam { + /** The pool properties to update. */ + body: BatchPoolUpdateOptions; } -export interface PoolPatchPoolQueryParamProperties { +export interface UpdatePoolQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -434,20 +501,26 @@ export interface PoolPatchPoolQueryParamProperties { timeOut?: number; } -export interface PoolPatchPoolQueryParam { - queryParameters?: PoolPatchPoolQueryParamProperties; +export interface UpdatePoolQueryParam { + queryParameters?: UpdatePoolQueryParamProperties; +} + +export interface UpdatePoolHeaderParam { + headers?: RawHttpHeadersInput & UpdatePoolHeaders; } -export interface PoolPatchPoolHeaderParam { - headers?: RawHttpHeadersInput & PoolPatchPoolHeaders; +export interface UpdatePoolMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type PoolPatchPoolParameters = PoolPatchPoolQueryParam & - PoolPatchPoolHeaderParam & - PoolPatchPoolBodyParam & +export type UpdatePoolParameters = UpdatePoolQueryParam & + UpdatePoolHeaderParam & + UpdatePoolMediaTypesParam & + UpdatePoolBodyParam & RequestParameters; -export interface PoolDisableAutoScaleHeaders { +export interface DisablePoolAutoScaleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -463,7 +536,7 @@ export interface PoolDisableAutoScaleHeaders { "ocp-date"?: string; } -export interface PoolDisableAutoScaleQueryParamProperties { +export interface DisablePoolAutoScaleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -471,19 +544,19 @@ export interface PoolDisableAutoScaleQueryParamProperties { timeOut?: number; } -export interface PoolDisableAutoScaleQueryParam { - queryParameters?: PoolDisableAutoScaleQueryParamProperties; +export interface DisablePoolAutoScaleQueryParam { + queryParameters?: DisablePoolAutoScaleQueryParamProperties; } -export interface PoolDisableAutoScaleHeaderParam { - headers?: RawHttpHeadersInput & PoolDisableAutoScaleHeaders; +export interface DisablePoolAutoScaleHeaderParam { + headers?: RawHttpHeadersInput & DisablePoolAutoScaleHeaders; } -export type PoolDisableAutoScaleParameters = PoolDisableAutoScaleQueryParam & - PoolDisableAutoScaleHeaderParam & +export type DisablePoolAutoScaleParameters = DisablePoolAutoScaleQueryParam & + DisablePoolAutoScaleHeaderParam & RequestParameters; -export interface PoolEnableAutoScaleHeaders { +export interface EnablePoolAutoScaleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -523,12 +596,12 @@ export interface PoolEnableAutoScaleHeaders { "if-unmodified-since"?: string; } -export interface PoolEnableAutoScaleBodyParam { - /** The parameters for the request. */ - body: BatchPoolEnableAutoScaleParameters; +export interface EnablePoolAutoScaleBodyParam { + /** The options to use for enabling automatic scaling. */ + body: BatchPoolEnableAutoScaleOptions; } -export interface PoolEnableAutoScaleQueryParamProperties { +export interface EnablePoolAutoScaleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -536,20 +609,26 @@ export interface PoolEnableAutoScaleQueryParamProperties { timeOut?: number; } -export interface PoolEnableAutoScaleQueryParam { - queryParameters?: PoolEnableAutoScaleQueryParamProperties; +export interface EnablePoolAutoScaleQueryParam { + queryParameters?: EnablePoolAutoScaleQueryParamProperties; +} + +export interface EnablePoolAutoScaleHeaderParam { + headers?: RawHttpHeadersInput & EnablePoolAutoScaleHeaders; } -export interface PoolEnableAutoScaleHeaderParam { - headers?: RawHttpHeadersInput & PoolEnableAutoScaleHeaders; +export interface EnablePoolAutoScaleMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type PoolEnableAutoScaleParameters = PoolEnableAutoScaleQueryParam & - PoolEnableAutoScaleHeaderParam & - PoolEnableAutoScaleBodyParam & +export type EnablePoolAutoScaleParameters = EnablePoolAutoScaleQueryParam & + EnablePoolAutoScaleHeaderParam & + EnablePoolAutoScaleMediaTypesParam & + EnablePoolAutoScaleBodyParam & RequestParameters; -export interface PoolEvaluateAutoScaleHeaders { +export interface EvaluatePoolAutoScaleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -565,12 +644,12 @@ export interface PoolEvaluateAutoScaleHeaders { "ocp-date"?: string; } -export interface PoolEvaluateAutoScaleBodyParam { - /** The parameters for the request. */ - body: BatchPoolEvaluateAutoScaleParameters; +export interface EvaluatePoolAutoScaleBodyParam { + /** The options to use for evaluating the automatic scaling formula. */ + body: BatchPoolEvaluateAutoScaleOptions; } -export interface PoolEvaluateAutoScaleQueryParamProperties { +export interface EvaluatePoolAutoScaleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -578,20 +657,26 @@ export interface PoolEvaluateAutoScaleQueryParamProperties { timeOut?: number; } -export interface PoolEvaluateAutoScaleQueryParam { - queryParameters?: PoolEvaluateAutoScaleQueryParamProperties; +export interface EvaluatePoolAutoScaleQueryParam { + queryParameters?: EvaluatePoolAutoScaleQueryParamProperties; +} + +export interface EvaluatePoolAutoScaleHeaderParam { + headers?: RawHttpHeadersInput & EvaluatePoolAutoScaleHeaders; } -export interface PoolEvaluateAutoScaleHeaderParam { - headers?: RawHttpHeadersInput & PoolEvaluateAutoScaleHeaders; +export interface EvaluatePoolAutoScaleMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type PoolEvaluateAutoScaleParameters = PoolEvaluateAutoScaleQueryParam & - PoolEvaluateAutoScaleHeaderParam & - PoolEvaluateAutoScaleBodyParam & +export type EvaluatePoolAutoScaleParameters = EvaluatePoolAutoScaleQueryParam & + EvaluatePoolAutoScaleHeaderParam & + EvaluatePoolAutoScaleMediaTypesParam & + EvaluatePoolAutoScaleBodyParam & RequestParameters; -export interface PoolResizeHeaders { +export interface ResizePoolHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -631,12 +716,12 @@ export interface PoolResizeHeaders { "if-unmodified-since"?: string; } -export interface PoolResizeBodyParam { - /** The parameters for the request. */ - body: BatchPoolResizeParameters; +export interface ResizePoolBodyParam { + /** The options to use for resizing the pool. */ + body: BatchPoolResizeOptions; } -export interface PoolResizeQueryParamProperties { +export interface ResizePoolQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -644,20 +729,26 @@ export interface PoolResizeQueryParamProperties { timeOut?: number; } -export interface PoolResizeQueryParam { - queryParameters?: PoolResizeQueryParamProperties; +export interface ResizePoolQueryParam { + queryParameters?: ResizePoolQueryParamProperties; +} + +export interface ResizePoolHeaderParam { + headers?: RawHttpHeadersInput & ResizePoolHeaders; } -export interface PoolResizeHeaderParam { - headers?: RawHttpHeadersInput & PoolResizeHeaders; +export interface ResizePoolMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type PoolResizeParameters = PoolResizeQueryParam & - PoolResizeHeaderParam & - PoolResizeBodyParam & +export type ResizePoolParameters = ResizePoolQueryParam & + ResizePoolHeaderParam & + ResizePoolMediaTypesParam & + ResizePoolBodyParam & RequestParameters; -export interface PoolStopResizeHeaders { +export interface StopPoolResizeHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -697,7 +788,7 @@ export interface PoolStopResizeHeaders { "if-unmodified-since"?: string; } -export interface PoolStopResizeQueryParamProperties { +export interface StopPoolResizeQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -705,19 +796,19 @@ export interface PoolStopResizeQueryParamProperties { timeOut?: number; } -export interface PoolStopResizeQueryParam { - queryParameters?: PoolStopResizeQueryParamProperties; +export interface StopPoolResizeQueryParam { + queryParameters?: StopPoolResizeQueryParamProperties; } -export interface PoolStopResizeHeaderParam { - headers?: RawHttpHeadersInput & PoolStopResizeHeaders; +export interface StopPoolResizeHeaderParam { + headers?: RawHttpHeadersInput & StopPoolResizeHeaders; } -export type PoolStopResizeParameters = PoolStopResizeQueryParam & - PoolStopResizeHeaderParam & +export type StopPoolResizeParameters = StopPoolResizeQueryParam & + StopPoolResizeHeaderParam & RequestParameters; -export interface PoolUpdatePropertiesHeaders { +export interface ReplacePoolPropertiesHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -733,12 +824,12 @@ export interface PoolUpdatePropertiesHeaders { "ocp-date"?: string; } -export interface PoolUpdatePropertiesBodyParam { - /** The parameters for the request. */ - body: BatchPool; +export interface ReplacePoolPropertiesBodyParam { + /** The options to use for replacing properties on the pool. */ + body: BatchPoolReplaceOptions; } -export interface PoolUpdatePropertiesQueryParamProperties { +export interface ReplacePoolPropertiesQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -746,20 +837,26 @@ export interface PoolUpdatePropertiesQueryParamProperties { timeOut?: number; } -export interface PoolUpdatePropertiesQueryParam { - queryParameters?: PoolUpdatePropertiesQueryParamProperties; +export interface ReplacePoolPropertiesQueryParam { + queryParameters?: ReplacePoolPropertiesQueryParamProperties; +} + +export interface ReplacePoolPropertiesHeaderParam { + headers?: RawHttpHeadersInput & ReplacePoolPropertiesHeaders; } -export interface PoolUpdatePropertiesHeaderParam { - headers?: RawHttpHeadersInput & PoolUpdatePropertiesHeaders; +export interface ReplacePoolPropertiesMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type PoolUpdatePropertiesParameters = PoolUpdatePropertiesQueryParam & - PoolUpdatePropertiesHeaderParam & - PoolUpdatePropertiesBodyParam & +export type ReplacePoolPropertiesParameters = ReplacePoolPropertiesQueryParam & + ReplacePoolPropertiesHeaderParam & + ReplacePoolPropertiesMediaTypesParam & + ReplacePoolPropertiesBodyParam & RequestParameters; -export interface PoolRemoveNodesHeaders { +export interface RemoveNodesHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -799,12 +896,12 @@ export interface PoolRemoveNodesHeaders { "if-unmodified-since"?: string; } -export interface PoolRemoveNodesBodyParam { - /** The parameters for the request. */ - body: NodeRemoveParameters; +export interface RemoveNodesBodyParam { + /** The options to use for removing the node. */ + body: NodeRemoveOptions; } -export interface PoolRemoveNodesQueryParamProperties { +export interface RemoveNodesQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -812,20 +909,26 @@ export interface PoolRemoveNodesQueryParamProperties { timeOut?: number; } -export interface PoolRemoveNodesQueryParam { - queryParameters?: PoolRemoveNodesQueryParamProperties; +export interface RemoveNodesQueryParam { + queryParameters?: RemoveNodesQueryParamProperties; +} + +export interface RemoveNodesHeaderParam { + headers?: RawHttpHeadersInput & RemoveNodesHeaders; } -export interface PoolRemoveNodesHeaderParam { - headers?: RawHttpHeadersInput & PoolRemoveNodesHeaders; +export interface RemoveNodesMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type PoolRemoveNodesParameters = PoolRemoveNodesQueryParam & - PoolRemoveNodesHeaderParam & - PoolRemoveNodesBodyParam & +export type RemoveNodesParameters = RemoveNodesQueryParam & + RemoveNodesHeaderParam & + RemoveNodesMediaTypesParam & + RemoveNodesBodyParam & RequestParameters; -export interface AccountListSupportedImagesHeaders { +export interface ListSupportedImagesHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -841,7 +944,7 @@ export interface AccountListSupportedImagesHeaders { "return-client-request-id"?: boolean; } -export interface AccountListSupportedImagesQueryParamProperties { +export interface ListSupportedImagesQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -859,20 +962,19 @@ export interface AccountListSupportedImagesQueryParamProperties { $filter?: string; } -export interface AccountListSupportedImagesQueryParam { - queryParameters?: AccountListSupportedImagesQueryParamProperties; +export interface ListSupportedImagesQueryParam { + queryParameters?: ListSupportedImagesQueryParamProperties; } -export interface AccountListSupportedImagesHeaderParam { - headers?: RawHttpHeadersInput & AccountListSupportedImagesHeaders; +export interface ListSupportedImagesHeaderParam { + headers?: RawHttpHeadersInput & ListSupportedImagesHeaders; } -export type AccountListSupportedImagesParameters = - AccountListSupportedImagesQueryParam & - AccountListSupportedImagesHeaderParam & - RequestParameters; +export type ListSupportedImagesParameters = ListSupportedImagesQueryParam & + ListSupportedImagesHeaderParam & + RequestParameters; -export interface AccountListPoolNodeCountsHeaders { +export interface ListPoolNodeCountsHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -888,7 +990,7 @@ export interface AccountListPoolNodeCountsHeaders { "return-client-request-id"?: boolean; } -export interface AccountListPoolNodeCountsQueryParamProperties { +export interface ListPoolNodeCountsQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -906,57 +1008,19 @@ export interface AccountListPoolNodeCountsQueryParamProperties { $filter?: string; } -export interface AccountListPoolNodeCountsQueryParam { - queryParameters?: AccountListPoolNodeCountsQueryParamProperties; -} - -export interface AccountListPoolNodeCountsHeaderParam { - headers?: RawHttpHeadersInput & AccountListPoolNodeCountsHeaders; -} - -export type AccountListPoolNodeCountsParameters = - AccountListPoolNodeCountsQueryParam & - AccountListPoolNodeCountsHeaderParam & - RequestParameters; - -export interface JobGetAllJobLifetimeStatisticsHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; -} - -export interface JobGetAllJobLifetimeStatisticsQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; -} - -export interface JobGetAllJobLifetimeStatisticsQueryParam { - queryParameters?: JobGetAllJobLifetimeStatisticsQueryParamProperties; +export interface ListPoolNodeCountsQueryParam { + queryParameters?: ListPoolNodeCountsQueryParamProperties; } -export interface JobGetAllJobLifetimeStatisticsHeaderParam { - headers?: RawHttpHeadersInput & JobGetAllJobLifetimeStatisticsHeaders; +export interface ListPoolNodeCountsHeaderParam { + headers?: RawHttpHeadersInput & ListPoolNodeCountsHeaders; } -export type JobGetAllJobLifetimeStatisticsParameters = - JobGetAllJobLifetimeStatisticsQueryParam & - JobGetAllJobLifetimeStatisticsHeaderParam & - RequestParameters; +export type ListPoolNodeCountsParameters = ListPoolNodeCountsQueryParam & + ListPoolNodeCountsHeaderParam & + RequestParameters; -export interface JobDeleteJobHeaders { +export interface DeleteJobHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -996,7 +1060,7 @@ export interface JobDeleteJobHeaders { "if-unmodified-since"?: string; } -export interface JobDeleteJobQueryParamProperties { +export interface DeleteJobQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1004,19 +1068,19 @@ export interface JobDeleteJobQueryParamProperties { timeOut?: number; } -export interface JobDeleteJobQueryParam { - queryParameters?: JobDeleteJobQueryParamProperties; +export interface DeleteJobQueryParam { + queryParameters?: DeleteJobQueryParamProperties; } -export interface JobDeleteJobHeaderParam { - headers?: RawHttpHeadersInput & JobDeleteJobHeaders; +export interface DeleteJobHeaderParam { + headers?: RawHttpHeadersInput & DeleteJobHeaders; } -export type JobDeleteJobParameters = JobDeleteJobQueryParam & - JobDeleteJobHeaderParam & +export type DeleteJobParameters = DeleteJobQueryParam & + DeleteJobHeaderParam & RequestParameters; -export interface JobGetJobHeaders { +export interface GetJobHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1056,31 +1120,31 @@ export interface JobGetJobHeaders { "if-unmodified-since"?: string; } -export interface JobGetJobQueryParamProperties { +export interface GetJobQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; /** An OData $select clause. */ - $select?: string; + $select?: string[]; /** An OData $expand clause. */ - $expand?: string; + $expand?: string[]; } -export interface JobGetJobQueryParam { - queryParameters?: JobGetJobQueryParamProperties; +export interface GetJobQueryParam { + queryParameters?: GetJobQueryParamProperties; } -export interface JobGetJobHeaderParam { - headers?: RawHttpHeadersInput & JobGetJobHeaders; +export interface GetJobHeaderParam { + headers?: RawHttpHeadersInput & GetJobHeaders; } -export type JobGetJobParameters = JobGetJobQueryParam & - JobGetJobHeaderParam & +export type GetJobParameters = GetJobQueryParam & + GetJobHeaderParam & RequestParameters; -export interface JobPatchJobHeaders { +export interface UpdateJobHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1120,12 +1184,12 @@ export interface JobPatchJobHeaders { "if-unmodified-since"?: string; } -export interface JobPatchJobBodyParam { - /** The parameters for the request. */ - body: BatchJob; +export interface UpdateJobBodyParam { + /** The options to use for updating the Job. */ + body: BatchJobUpdateOptions; } -export interface JobPatchJobQueryParamProperties { +export interface UpdateJobQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1133,20 +1197,26 @@ export interface JobPatchJobQueryParamProperties { timeOut?: number; } -export interface JobPatchJobQueryParam { - queryParameters?: JobPatchJobQueryParamProperties; +export interface UpdateJobQueryParam { + queryParameters?: UpdateJobQueryParamProperties; +} + +export interface UpdateJobHeaderParam { + headers?: RawHttpHeadersInput & UpdateJobHeaders; } -export interface JobPatchJobHeaderParam { - headers?: RawHttpHeadersInput & JobPatchJobHeaders; +export interface UpdateJobMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type JobPatchJobParameters = JobPatchJobQueryParam & - JobPatchJobHeaderParam & - JobPatchJobBodyParam & +export type UpdateJobParameters = UpdateJobQueryParam & + UpdateJobHeaderParam & + UpdateJobMediaTypesParam & + UpdateJobBodyParam & RequestParameters; -export interface JobUpdateJobHeaders { +export interface ReplaceJobHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1186,12 +1256,12 @@ export interface JobUpdateJobHeaders { "if-unmodified-since"?: string; } -export interface JobUpdateJobBodyParam { - /** The parameters for the request. */ +export interface ReplaceJobBodyParam { + /** A job with updated properties */ body: BatchJob; } -export interface JobUpdateJobQueryParamProperties { +export interface ReplaceJobQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1199,20 +1269,26 @@ export interface JobUpdateJobQueryParamProperties { timeOut?: number; } -export interface JobUpdateJobQueryParam { - queryParameters?: JobUpdateJobQueryParamProperties; +export interface ReplaceJobQueryParam { + queryParameters?: ReplaceJobQueryParamProperties; } -export interface JobUpdateJobHeaderParam { - headers?: RawHttpHeadersInput & JobUpdateJobHeaders; +export interface ReplaceJobHeaderParam { + headers?: RawHttpHeadersInput & ReplaceJobHeaders; } -export type JobUpdateJobParameters = JobUpdateJobQueryParam & - JobUpdateJobHeaderParam & - JobUpdateJobBodyParam & +export interface ReplaceJobMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type ReplaceJobParameters = ReplaceJobQueryParam & + ReplaceJobHeaderParam & + ReplaceJobMediaTypesParam & + ReplaceJobBodyParam & RequestParameters; -export interface JobDisableJobHeaders { +export interface DisableJobHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1252,12 +1328,12 @@ export interface JobDisableJobHeaders { "if-unmodified-since"?: string; } -export interface JobDisableJobBodyParam { - /** The parameters for the request. */ - body: BatchJobDisableParameters; +export interface DisableJobBodyParam { + /** The options to use for disabling the Job. */ + body: BatchJobDisableOptions; } -export interface JobDisableJobQueryParamProperties { +export interface DisableJobQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1265,20 +1341,26 @@ export interface JobDisableJobQueryParamProperties { timeOut?: number; } -export interface JobDisableJobQueryParam { - queryParameters?: JobDisableJobQueryParamProperties; +export interface DisableJobQueryParam { + queryParameters?: DisableJobQueryParamProperties; +} + +export interface DisableJobHeaderParam { + headers?: RawHttpHeadersInput & DisableJobHeaders; } -export interface JobDisableJobHeaderParam { - headers?: RawHttpHeadersInput & JobDisableJobHeaders; +export interface DisableJobMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type JobDisableJobParameters = JobDisableJobQueryParam & - JobDisableJobHeaderParam & - JobDisableJobBodyParam & +export type DisableJobParameters = DisableJobQueryParam & + DisableJobHeaderParam & + DisableJobMediaTypesParam & + DisableJobBodyParam & RequestParameters; -export interface JobEnableJobHeaders { +export interface EnableJobHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1318,7 +1400,7 @@ export interface JobEnableJobHeaders { "if-unmodified-since"?: string; } -export interface JobEnableJobQueryParamProperties { +export interface EnableJobQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1326,19 +1408,19 @@ export interface JobEnableJobQueryParamProperties { timeOut?: number; } -export interface JobEnableJobQueryParam { - queryParameters?: JobEnableJobQueryParamProperties; +export interface EnableJobQueryParam { + queryParameters?: EnableJobQueryParamProperties; } -export interface JobEnableJobHeaderParam { - headers?: RawHttpHeadersInput & JobEnableJobHeaders; +export interface EnableJobHeaderParam { + headers?: RawHttpHeadersInput & EnableJobHeaders; } -export type JobEnableJobParameters = JobEnableJobQueryParam & - JobEnableJobHeaderParam & +export type EnableJobParameters = EnableJobQueryParam & + EnableJobHeaderParam & RequestParameters; -export interface JobTerminateJobHeaders { +export interface TerminateJobHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1378,12 +1460,12 @@ export interface JobTerminateJobHeaders { "if-unmodified-since"?: string; } -export interface JobTerminateJobBodyParam { - /** The parameters for the request. */ - body?: BatchJobTerminateParameters; +export interface TerminateJobBodyParam { + /** The options to use for terminating the Job. */ + body?: BatchJobTerminateOptions; } -export interface JobTerminateJobQueryParamProperties { +export interface TerminateJobQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1391,20 +1473,26 @@ export interface JobTerminateJobQueryParamProperties { timeOut?: number; } -export interface JobTerminateJobQueryParam { - queryParameters?: JobTerminateJobQueryParamProperties; +export interface TerminateJobQueryParam { + queryParameters?: TerminateJobQueryParamProperties; } -export interface JobTerminateJobHeaderParam { - headers?: RawHttpHeadersInput & JobTerminateJobHeaders; +export interface TerminateJobHeaderParam { + headers?: RawHttpHeadersInput & TerminateJobHeaders; } -export type JobTerminateJobParameters = JobTerminateJobQueryParam & - JobTerminateJobHeaderParam & - JobTerminateJobBodyParam & +export interface TerminateJobMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type TerminateJobParameters = TerminateJobQueryParam & + TerminateJobHeaderParam & + TerminateJobMediaTypesParam & + TerminateJobBodyParam & RequestParameters; -export interface JobAddJobHeaders { +export interface CreateJobHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1420,12 +1508,12 @@ export interface JobAddJobHeaders { "ocp-date"?: string; } -export interface JobAddJobBodyParam { - /** The Job to be added. */ - body: BatchJob; +export interface CreateJobBodyParam { + /** The Job to be created. */ + body: BatchJobCreateOptions; } -export interface JobAddJobQueryParamProperties { +export interface CreateJobQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1433,20 +1521,26 @@ export interface JobAddJobQueryParamProperties { timeOut?: number; } -export interface JobAddJobQueryParam { - queryParameters?: JobAddJobQueryParamProperties; +export interface CreateJobQueryParam { + queryParameters?: CreateJobQueryParamProperties; +} + +export interface CreateJobHeaderParam { + headers?: RawHttpHeadersInput & CreateJobHeaders; } -export interface JobAddJobHeaderParam { - headers?: RawHttpHeadersInput & JobAddJobHeaders; +export interface CreateJobMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type JobAddJobParameters = JobAddJobQueryParam & - JobAddJobHeaderParam & - JobAddJobBodyParam & +export type CreateJobParameters = CreateJobQueryParam & + CreateJobHeaderParam & + CreateJobMediaTypesParam & + CreateJobBodyParam & RequestParameters; -export interface JobListJobsHeaders { +export interface ListJobsHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -1462,7 +1556,7 @@ export interface JobListJobsHeaders { "return-client-request-id"?: boolean; } -export interface JobListJobsQueryParamProperties { +export interface ListJobsQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1479,24 +1573,24 @@ export interface JobListJobsQueryParamProperties { */ $filter?: string; /** An OData $select clause. */ - $select?: string; + $select?: string[]; /** An OData $expand clause. */ - $expand?: string; + $expand?: string[]; } -export interface JobListJobsQueryParam { - queryParameters?: JobListJobsQueryParamProperties; +export interface ListJobsQueryParam { + queryParameters?: ListJobsQueryParamProperties; } -export interface JobListJobsHeaderParam { - headers?: RawHttpHeadersInput & JobListJobsHeaders; +export interface ListJobsHeaderParam { + headers?: RawHttpHeadersInput & ListJobsHeaders; } -export type JobListJobsParameters = JobListJobsQueryParam & - JobListJobsHeaderParam & +export type ListJobsParameters = ListJobsQueryParam & + ListJobsHeaderParam & RequestParameters; -export interface JobListFromJobScheduleHeaders { +export interface ListJobsFromScheduleHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -1512,7 +1606,7 @@ export interface JobListFromJobScheduleHeaders { "return-client-request-id"?: boolean; } -export interface JobListFromJobScheduleQueryParamProperties { +export interface ListJobsFromScheduleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1529,25 +1623,24 @@ export interface JobListFromJobScheduleQueryParamProperties { */ $filter?: string; /** An OData $select clause. */ - $select?: string; + $select?: string[]; /** An OData $expand clause. */ - $expand?: string; + $expand?: string[]; } -export interface JobListFromJobScheduleQueryParam { - queryParameters?: JobListFromJobScheduleQueryParamProperties; +export interface ListJobsFromScheduleQueryParam { + queryParameters?: ListJobsFromScheduleQueryParamProperties; } -export interface JobListFromJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobListFromJobScheduleHeaders; +export interface ListJobsFromScheduleHeaderParam { + headers?: RawHttpHeadersInput & ListJobsFromScheduleHeaders; } -export type JobListFromJobScheduleParameters = - JobListFromJobScheduleQueryParam & - JobListFromJobScheduleHeaderParam & - RequestParameters; +export type ListJobsFromScheduleParameters = ListJobsFromScheduleQueryParam & + ListJobsFromScheduleHeaderParam & + RequestParameters; -export interface JobListPreparationAndReleaseTaskStatusHeaders { +export interface ListJobPreparationAndReleaseTaskStatusHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -1563,7 +1656,7 @@ export interface JobListPreparationAndReleaseTaskStatusHeaders { "return-client-request-id"?: boolean; } -export interface JobListPreparationAndReleaseTaskStatusQueryParamProperties { +export interface ListJobPreparationAndReleaseTaskStatusQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1580,23 +1673,23 @@ export interface JobListPreparationAndReleaseTaskStatusQueryParamProperties { */ $filter?: string; /** An OData $select clause. */ - $select?: string; + $select?: string[]; } -export interface JobListPreparationAndReleaseTaskStatusQueryParam { - queryParameters?: JobListPreparationAndReleaseTaskStatusQueryParamProperties; +export interface ListJobPreparationAndReleaseTaskStatusQueryParam { + queryParameters?: ListJobPreparationAndReleaseTaskStatusQueryParamProperties; } -export interface JobListPreparationAndReleaseTaskStatusHeaderParam { - headers?: RawHttpHeadersInput & JobListPreparationAndReleaseTaskStatusHeaders; +export interface ListJobPreparationAndReleaseTaskStatusHeaderParam { + headers?: RawHttpHeadersInput & ListJobPreparationAndReleaseTaskStatusHeaders; } -export type JobListPreparationAndReleaseTaskStatusParameters = - JobListPreparationAndReleaseTaskStatusQueryParam & - JobListPreparationAndReleaseTaskStatusHeaderParam & +export type ListJobPreparationAndReleaseTaskStatusParameters = + ListJobPreparationAndReleaseTaskStatusQueryParam & + ListJobPreparationAndReleaseTaskStatusHeaderParam & RequestParameters; -export interface JobGetTaskCountsHeaders { +export interface GetJobTaskCountsHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1612,7 +1705,7 @@ export interface JobGetTaskCountsHeaders { "ocp-date"?: string; } -export interface JobGetTaskCountsQueryParamProperties { +export interface GetJobTaskCountsQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1620,19 +1713,19 @@ export interface JobGetTaskCountsQueryParamProperties { timeOut?: number; } -export interface JobGetTaskCountsQueryParam { - queryParameters?: JobGetTaskCountsQueryParamProperties; +export interface GetJobTaskCountsQueryParam { + queryParameters?: GetJobTaskCountsQueryParamProperties; } -export interface JobGetTaskCountsHeaderParam { - headers?: RawHttpHeadersInput & JobGetTaskCountsHeaders; +export interface GetJobTaskCountsHeaderParam { + headers?: RawHttpHeadersInput & GetJobTaskCountsHeaders; } -export type JobGetTaskCountsParameters = JobGetTaskCountsQueryParam & - JobGetTaskCountsHeaderParam & +export type GetJobTaskCountsParameters = GetJobTaskCountsQueryParam & + GetJobTaskCountsHeaderParam & RequestParameters; -export interface CertificatesAddCertificateHeaders { +export interface CreateCertificateHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1648,12 +1741,12 @@ export interface CertificatesAddCertificateHeaders { "ocp-date"?: string; } -export interface CertificatesAddCertificateBodyParam { - /** The Certificate to be added. */ - body: Certificate; +export interface CreateCertificateBodyParam { + /** The Certificate to be created. */ + body: BatchCertificate; } -export interface CertificatesAddCertificateQueryParamProperties { +export interface CreateCertificateQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1661,21 +1754,26 @@ export interface CertificatesAddCertificateQueryParamProperties { timeOut?: number; } -export interface CertificatesAddCertificateQueryParam { - queryParameters?: CertificatesAddCertificateQueryParamProperties; +export interface CreateCertificateQueryParam { + queryParameters?: CreateCertificateQueryParamProperties; } -export interface CertificatesAddCertificateHeaderParam { - headers?: RawHttpHeadersInput & CertificatesAddCertificateHeaders; +export interface CreateCertificateHeaderParam { + headers?: RawHttpHeadersInput & CreateCertificateHeaders; } -export type CertificatesAddCertificateParameters = - CertificatesAddCertificateQueryParam & - CertificatesAddCertificateHeaderParam & - CertificatesAddCertificateBodyParam & - RequestParameters; +export interface CreateCertificateMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type CreateCertificateParameters = CreateCertificateQueryParam & + CreateCertificateHeaderParam & + CreateCertificateMediaTypesParam & + CreateCertificateBodyParam & + RequestParameters; -export interface CertificatesListCertificatesHeaders { +export interface ListCertificatesHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -1691,7 +1789,7 @@ export interface CertificatesListCertificatesHeaders { "return-client-request-id"?: boolean; } -export interface CertificatesListCertificatesQueryParamProperties { +export interface ListCertificatesQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1708,23 +1806,22 @@ export interface CertificatesListCertificatesQueryParamProperties { */ $filter?: string; /** An OData $select clause. */ - $select?: string; + $select?: string[]; } -export interface CertificatesListCertificatesQueryParam { - queryParameters?: CertificatesListCertificatesQueryParamProperties; +export interface ListCertificatesQueryParam { + queryParameters?: ListCertificatesQueryParamProperties; } -export interface CertificatesListCertificatesHeaderParam { - headers?: RawHttpHeadersInput & CertificatesListCertificatesHeaders; +export interface ListCertificatesHeaderParam { + headers?: RawHttpHeadersInput & ListCertificatesHeaders; } -export type CertificatesListCertificatesParameters = - CertificatesListCertificatesQueryParam & - CertificatesListCertificatesHeaderParam & - RequestParameters; +export type ListCertificatesParameters = ListCertificatesQueryParam & + ListCertificatesHeaderParam & + RequestParameters; -export interface CertificatesCancelCertificateDeletionHeaders { +export interface CancelCertificateDeletionHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1740,7 +1837,7 @@ export interface CertificatesCancelCertificateDeletionHeaders { "ocp-date"?: string; } -export interface CertificatesCancelCertificateDeletionQueryParamProperties { +export interface CancelCertificateDeletionQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1748,20 +1845,20 @@ export interface CertificatesCancelCertificateDeletionQueryParamProperties { timeOut?: number; } -export interface CertificatesCancelCertificateDeletionQueryParam { - queryParameters?: CertificatesCancelCertificateDeletionQueryParamProperties; +export interface CancelCertificateDeletionQueryParam { + queryParameters?: CancelCertificateDeletionQueryParamProperties; } -export interface CertificatesCancelCertificateDeletionHeaderParam { - headers?: RawHttpHeadersInput & CertificatesCancelCertificateDeletionHeaders; +export interface CancelCertificateDeletionHeaderParam { + headers?: RawHttpHeadersInput & CancelCertificateDeletionHeaders; } -export type CertificatesCancelCertificateDeletionParameters = - CertificatesCancelCertificateDeletionQueryParam & - CertificatesCancelCertificateDeletionHeaderParam & +export type CancelCertificateDeletionParameters = + CancelCertificateDeletionQueryParam & + CancelCertificateDeletionHeaderParam & RequestParameters; -export interface CertificatesDeleteCertificateHeaders { +export interface DeleteCertificateHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1777,7 +1874,7 @@ export interface CertificatesDeleteCertificateHeaders { "ocp-date"?: string; } -export interface CertificatesDeleteCertificateQueryParamProperties { +export interface DeleteCertificateQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1785,20 +1882,19 @@ export interface CertificatesDeleteCertificateQueryParamProperties { timeOut?: number; } -export interface CertificatesDeleteCertificateQueryParam { - queryParameters?: CertificatesDeleteCertificateQueryParamProperties; +export interface DeleteCertificateQueryParam { + queryParameters?: DeleteCertificateQueryParamProperties; } -export interface CertificatesDeleteCertificateHeaderParam { - headers?: RawHttpHeadersInput & CertificatesDeleteCertificateHeaders; +export interface DeleteCertificateHeaderParam { + headers?: RawHttpHeadersInput & DeleteCertificateHeaders; } -export type CertificatesDeleteCertificateParameters = - CertificatesDeleteCertificateQueryParam & - CertificatesDeleteCertificateHeaderParam & - RequestParameters; +export type DeleteCertificateParameters = DeleteCertificateQueryParam & + DeleteCertificateHeaderParam & + RequestParameters; -export interface CertificatesGetCertificateHeaders { +export interface GetCertificateHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1814,30 +1910,29 @@ export interface CertificatesGetCertificateHeaders { "ocp-date"?: string; } -export interface CertificatesGetCertificateQueryParamProperties { +export interface GetCertificateQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; /** An OData $select clause. */ - $select?: string; + $select?: string[]; } -export interface CertificatesGetCertificateQueryParam { - queryParameters?: CertificatesGetCertificateQueryParamProperties; +export interface GetCertificateQueryParam { + queryParameters?: GetCertificateQueryParamProperties; } -export interface CertificatesGetCertificateHeaderParam { - headers?: RawHttpHeadersInput & CertificatesGetCertificateHeaders; +export interface GetCertificateHeaderParam { + headers?: RawHttpHeadersInput & GetCertificateHeaders; } -export type CertificatesGetCertificateParameters = - CertificatesGetCertificateQueryParam & - CertificatesGetCertificateHeaderParam & - RequestParameters; +export type GetCertificateParameters = GetCertificateQueryParam & + GetCertificateHeaderParam & + RequestParameters; -export interface FileDeleteFromTaskHeaders { +export interface JobScheduleExistsHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1851,36 +1946,53 @@ export interface FileDeleteFromTaskHeaders { * directly. */ "ocp-date"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + "if-match"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + "if-none-match"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + "if-modified-since"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + "if-unmodified-since"?: string; } -export interface FileDeleteFromTaskQueryParamProperties { +export interface JobScheduleExistsQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; - /** - * Whether to delete children of a directory. If the filePath parameter represents - * a directory instead of a file, you can set recursive to true to delete the - * directory and all of the files and subdirectories in it. If recursive is false - * then the directory must be empty or deletion will fail. - */ - recursive?: boolean; } -export interface FileDeleteFromTaskQueryParam { - queryParameters?: FileDeleteFromTaskQueryParamProperties; +export interface JobScheduleExistsQueryParam { + queryParameters?: JobScheduleExistsQueryParamProperties; } -export interface FileDeleteFromTaskHeaderParam { - headers?: RawHttpHeadersInput & FileDeleteFromTaskHeaders; +export interface JobScheduleExistsHeaderParam { + headers?: RawHttpHeadersInput & JobScheduleExistsHeaders; } -export type FileDeleteFromTaskParameters = FileDeleteFromTaskQueryParam & - FileDeleteFromTaskHeaderParam & +export type JobScheduleExistsParameters = JobScheduleExistsQueryParam & + JobScheduleExistsHeaderParam & RequestParameters; -export interface FileGetFromTaskHeaders { +export interface DeleteJobScheduleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1894,6 +2006,18 @@ export interface FileGetFromTaskHeaders { * directly. */ "ocp-date"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + "if-match"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + "if-none-match"?: string; /** * A timestamp indicating the last modified time of the resource known to the * client. The operation will be performed only if the resource on the service has @@ -1906,14 +2030,9 @@ export interface FileGetFromTaskHeaders { * not been modified since the specified time. */ "if-unmodified-since"?: string; - /** - * The byte range to be retrieved. The default is to retrieve the entire file. The - * format is bytes=startRange-endRange. - */ - "ocp-range"?: string; } -export interface FileGetFromTaskQueryParamProperties { +export interface DeleteJobScheduleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -1921,19 +2040,19 @@ export interface FileGetFromTaskQueryParamProperties { timeOut?: number; } -export interface FileGetFromTaskQueryParam { - queryParameters?: FileGetFromTaskQueryParamProperties; +export interface DeleteJobScheduleQueryParam { + queryParameters?: DeleteJobScheduleQueryParamProperties; } -export interface FileGetFromTaskHeaderParam { - headers?: RawHttpHeadersInput & FileGetFromTaskHeaders; +export interface DeleteJobScheduleHeaderParam { + headers?: RawHttpHeadersInput & DeleteJobScheduleHeaders; } -export type FileGetFromTaskParameters = FileGetFromTaskQueryParam & - FileGetFromTaskHeaderParam & +export type DeleteJobScheduleParameters = DeleteJobScheduleQueryParam & + DeleteJobScheduleHeaderParam & RequestParameters; -export interface FileGetPropertiesFromTaskHeaders { +export interface GetJobScheduleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1947,6 +2066,18 @@ export interface FileGetPropertiesFromTaskHeaders { * directly. */ "ocp-date"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + "if-match"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + "if-none-match"?: string; /** * A timestamp indicating the last modified time of the resource known to the * client. The operation will be performed only if the resource on the service has @@ -1961,28 +2092,31 @@ export interface FileGetPropertiesFromTaskHeaders { "if-unmodified-since"?: string; } -export interface FileGetPropertiesFromTaskQueryParamProperties { +export interface GetJobScheduleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; } -export interface FileGetPropertiesFromTaskQueryParam { - queryParameters?: FileGetPropertiesFromTaskQueryParamProperties; +export interface GetJobScheduleQueryParam { + queryParameters?: GetJobScheduleQueryParamProperties; } -export interface FileGetPropertiesFromTaskHeaderParam { - headers?: RawHttpHeadersInput & FileGetPropertiesFromTaskHeaders; +export interface GetJobScheduleHeaderParam { + headers?: RawHttpHeadersInput & GetJobScheduleHeaders; } -export type FileGetPropertiesFromTaskParameters = - FileGetPropertiesFromTaskQueryParam & - FileGetPropertiesFromTaskHeaderParam & - RequestParameters; +export type GetJobScheduleParameters = GetJobScheduleQueryParam & + GetJobScheduleHeaderParam & + RequestParameters; -export interface FileDeleteFromComputeNodeHeaders { +export interface UpdateJobScheduleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -1996,37 +2130,65 @@ export interface FileDeleteFromComputeNodeHeaders { * directly. */ "ocp-date"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + "if-match"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + "if-none-match"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + "if-modified-since"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + "if-unmodified-since"?: string; +} + +export interface UpdateJobScheduleBodyParam { + /** The options to use for updating the Job Schedule. */ + body: BatchJobScheduleUpdateOptions; } -export interface FileDeleteFromComputeNodeQueryParamProperties { +export interface UpdateJobScheduleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; - /** - * Whether to delete children of a directory. If the filePath parameter represents - * a directory instead of a file, you can set recursive to true to delete the - * directory and all of the files and subdirectories in it. If recursive is false - * then the directory must be empty or deletion will fail. - */ - recursive?: boolean; } -export interface FileDeleteFromComputeNodeQueryParam { - queryParameters?: FileDeleteFromComputeNodeQueryParamProperties; +export interface UpdateJobScheduleQueryParam { + queryParameters?: UpdateJobScheduleQueryParamProperties; } -export interface FileDeleteFromComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & FileDeleteFromComputeNodeHeaders; +export interface UpdateJobScheduleHeaderParam { + headers?: RawHttpHeadersInput & UpdateJobScheduleHeaders; } -export type FileDeleteFromComputeNodeParameters = - FileDeleteFromComputeNodeQueryParam & - FileDeleteFromComputeNodeHeaderParam & - RequestParameters; +export interface UpdateJobScheduleMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} -export interface FileGetFromComputeNodeHeaders { +export type UpdateJobScheduleParameters = UpdateJobScheduleQueryParam & + UpdateJobScheduleHeaderParam & + UpdateJobScheduleMediaTypesParam & + UpdateJobScheduleBodyParam & + RequestParameters; + +export interface ReplaceJobScheduleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2040,6 +2202,18 @@ export interface FileGetFromComputeNodeHeaders { * directly. */ "ocp-date"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + "if-match"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + "if-none-match"?: string; /** * A timestamp indicating the last modified time of the resource known to the * client. The operation will be performed only if the resource on the service has @@ -2052,14 +2226,14 @@ export interface FileGetFromComputeNodeHeaders { * not been modified since the specified time. */ "if-unmodified-since"?: string; - /** - * The byte range to be retrieved. The default is to retrieve the entire file. The - * format is bytes=startRange-endRange. - */ - "ocp-range"?: string; } -export interface FileGetFromComputeNodeQueryParamProperties { +export interface ReplaceJobScheduleBodyParam { + /** A Job Schedule with updated properties */ + body: BatchJobSchedule; +} + +export interface ReplaceJobScheduleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2067,20 +2241,26 @@ export interface FileGetFromComputeNodeQueryParamProperties { timeOut?: number; } -export interface FileGetFromComputeNodeQueryParam { - queryParameters?: FileGetFromComputeNodeQueryParamProperties; +export interface ReplaceJobScheduleQueryParam { + queryParameters?: ReplaceJobScheduleQueryParamProperties; } -export interface FileGetFromComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & FileGetFromComputeNodeHeaders; +export interface ReplaceJobScheduleHeaderParam { + headers?: RawHttpHeadersInput & ReplaceJobScheduleHeaders; } -export type FileGetFromComputeNodeParameters = - FileGetFromComputeNodeQueryParam & - FileGetFromComputeNodeHeaderParam & - RequestParameters; +export interface ReplaceJobScheduleMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} -export interface FileGetPropertiesFromComputeNodeHeaders { +export type ReplaceJobScheduleParameters = ReplaceJobScheduleQueryParam & + ReplaceJobScheduleHeaderParam & + ReplaceJobScheduleMediaTypesParam & + ReplaceJobScheduleBodyParam & + RequestParameters; + +export interface DisableJobScheduleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2094,6 +2274,18 @@ export interface FileGetPropertiesFromComputeNodeHeaders { * directly. */ "ocp-date"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + "if-match"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + "if-none-match"?: string; /** * A timestamp indicating the last modified time of the resource known to the * client. The operation will be performed only if the resource on the service has @@ -2108,7 +2300,7 @@ export interface FileGetPropertiesFromComputeNodeHeaders { "if-unmodified-since"?: string; } -export interface FileGetPropertiesFromComputeNodeQueryParamProperties { +export interface DisableJobScheduleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2116,120 +2308,19 @@ export interface FileGetPropertiesFromComputeNodeQueryParamProperties { timeOut?: number; } -export interface FileGetPropertiesFromComputeNodeQueryParam { - queryParameters?: FileGetPropertiesFromComputeNodeQueryParamProperties; +export interface DisableJobScheduleQueryParam { + queryParameters?: DisableJobScheduleQueryParamProperties; } -export interface FileGetPropertiesFromComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & FileGetPropertiesFromComputeNodeHeaders; +export interface DisableJobScheduleHeaderParam { + headers?: RawHttpHeadersInput & DisableJobScheduleHeaders; } -export type FileGetPropertiesFromComputeNodeParameters = - FileGetPropertiesFromComputeNodeQueryParam & - FileGetPropertiesFromComputeNodeHeaderParam & - RequestParameters; +export type DisableJobScheduleParameters = DisableJobScheduleQueryParam & + DisableJobScheduleHeaderParam & + RequestParameters; -export interface FileListFromTaskHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface FileListFromTaskQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. - */ - $filter?: string; - /** - * Whether to list children of the Task directory. This parameter can be used in - * combination with the filter parameter to list specific type of files. - */ - recursive?: boolean; -} - -export interface FileListFromTaskQueryParam { - queryParameters?: FileListFromTaskQueryParamProperties; -} - -export interface FileListFromTaskHeaderParam { - headers?: RawHttpHeadersInput & FileListFromTaskHeaders; -} - -export type FileListFromTaskParameters = FileListFromTaskQueryParam & - FileListFromTaskHeaderParam & - RequestParameters; - -export interface FileListFromComputeNodeHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface FileListFromComputeNodeQueryParamProperties { - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - maxresults?: number; - /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. - */ - timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. - */ - $filter?: string; - /** Whether to list children of a directory. */ - recursive?: boolean; -} - -export interface FileListFromComputeNodeQueryParam { - queryParameters?: FileListFromComputeNodeQueryParamProperties; -} - -export interface FileListFromComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & FileListFromComputeNodeHeaders; -} - -export type FileListFromComputeNodeParameters = - FileListFromComputeNodeQueryParam & - FileListFromComputeNodeHeaderParam & - RequestParameters; - -export interface JobScheduleJobScheduleExistsHeaders { +export interface EnableJobScheduleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2269,7 +2360,7 @@ export interface JobScheduleJobScheduleExistsHeaders { "if-unmodified-since"?: string; } -export interface JobScheduleJobScheduleExistsQueryParamProperties { +export interface EnableJobScheduleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2277,20 +2368,19 @@ export interface JobScheduleJobScheduleExistsQueryParamProperties { timeOut?: number; } -export interface JobScheduleJobScheduleExistsQueryParam { - queryParameters?: JobScheduleJobScheduleExistsQueryParamProperties; +export interface EnableJobScheduleQueryParam { + queryParameters?: EnableJobScheduleQueryParamProperties; } -export interface JobScheduleJobScheduleExistsHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleJobScheduleExistsHeaders; +export interface EnableJobScheduleHeaderParam { + headers?: RawHttpHeadersInput & EnableJobScheduleHeaders; } -export type JobScheduleJobScheduleExistsParameters = - JobScheduleJobScheduleExistsQueryParam & - JobScheduleJobScheduleExistsHeaderParam & - RequestParameters; +export type EnableJobScheduleParameters = EnableJobScheduleQueryParam & + EnableJobScheduleHeaderParam & + RequestParameters; -export interface JobScheduleDeleteJobScheduleHeaders { +export interface TerminateJobScheduleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2330,7 +2420,7 @@ export interface JobScheduleDeleteJobScheduleHeaders { "if-unmodified-since"?: string; } -export interface JobScheduleDeleteJobScheduleQueryParamProperties { +export interface TerminateJobScheduleQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2338,20 +2428,19 @@ export interface JobScheduleDeleteJobScheduleQueryParamProperties { timeOut?: number; } -export interface JobScheduleDeleteJobScheduleQueryParam { - queryParameters?: JobScheduleDeleteJobScheduleQueryParamProperties; +export interface TerminateJobScheduleQueryParam { + queryParameters?: TerminateJobScheduleQueryParamProperties; } -export interface JobScheduleDeleteJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleDeleteJobScheduleHeaders; +export interface TerminateJobScheduleHeaderParam { + headers?: RawHttpHeadersInput & TerminateJobScheduleHeaders; } -export type JobScheduleDeleteJobScheduleParameters = - JobScheduleDeleteJobScheduleQueryParam & - JobScheduleDeleteJobScheduleHeaderParam & - RequestParameters; +export type TerminateJobScheduleParameters = TerminateJobScheduleQueryParam & + TerminateJobScheduleHeaderParam & + RequestParameters; -export interface JobScheduleGetJobScheduleHeaders { +export interface CreateJobScheduleHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2365,58 +2454,91 @@ export interface JobScheduleGetJobScheduleHeaders { * directly. */ "ocp-date"?: string; +} + +export interface CreateJobScheduleBodyParam { + /** The Job Schedule to be created. */ + body: BatchJobScheduleCreateOptions; +} + +export interface CreateJobScheduleQueryParamProperties { /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. */ - "if-none-match"?: string; + timeOut?: number; +} + +export interface CreateJobScheduleQueryParam { + queryParameters?: CreateJobScheduleQueryParamProperties; +} + +export interface CreateJobScheduleHeaderParam { + headers?: RawHttpHeadersInput & CreateJobScheduleHeaders; +} + +export interface CreateJobScheduleMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type CreateJobScheduleParameters = CreateJobScheduleQueryParam & + CreateJobScheduleHeaderParam & + CreateJobScheduleMediaTypesParam & + CreateJobScheduleBodyParam & + RequestParameters; + +export interface ListJobSchedulesHeaders { /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. */ - "if-modified-since"?: string; + "ocp-date"?: string; /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. */ - "if-unmodified-since"?: string; + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; } -export interface JobScheduleGetJobScheduleQueryParamProperties { +export interface ListJobSchedulesQueryParamProperties { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + */ + $filter?: string; /** An OData $select clause. */ - $select?: string; + $select?: string[]; /** An OData $expand clause. */ - $expand?: string; + $expand?: string[]; } -export interface JobScheduleGetJobScheduleQueryParam { - queryParameters?: JobScheduleGetJobScheduleQueryParamProperties; +export interface ListJobSchedulesQueryParam { + queryParameters?: ListJobSchedulesQueryParamProperties; } -export interface JobScheduleGetJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleGetJobScheduleHeaders; +export interface ListJobSchedulesHeaderParam { + headers?: RawHttpHeadersInput & ListJobSchedulesHeaders; } -export type JobScheduleGetJobScheduleParameters = - JobScheduleGetJobScheduleQueryParam & - JobScheduleGetJobScheduleHeaderParam & - RequestParameters; +export type ListJobSchedulesParameters = ListJobSchedulesQueryParam & + ListJobSchedulesHeaderParam & + RequestParameters; -export interface JobSchedulePatchJobScheduleHeaders { +export interface CreateTaskHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2430,38 +2552,14 @@ export interface JobSchedulePatchJobScheduleHeaders { * directly. */ "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; } -export interface JobSchedulePatchJobScheduleBodyParam { - /** The parameters for the request. */ - body: BatchJobSchedule; +export interface CreateTaskBodyParam { + /** The Task to be created. */ + body: BatchTaskCreateOptions; } -export interface JobSchedulePatchJobScheduleQueryParamProperties { +export interface CreateTaskQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2469,21 +2567,32 @@ export interface JobSchedulePatchJobScheduleQueryParamProperties { timeOut?: number; } -export interface JobSchedulePatchJobScheduleQueryParam { - queryParameters?: JobSchedulePatchJobScheduleQueryParamProperties; +export interface CreateTaskQueryParam { + queryParameters?: CreateTaskQueryParamProperties; } -export interface JobSchedulePatchJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobSchedulePatchJobScheduleHeaders; +export interface CreateTaskHeaderParam { + headers?: RawHttpHeadersInput & CreateTaskHeaders; } -export type JobSchedulePatchJobScheduleParameters = - JobSchedulePatchJobScheduleQueryParam & - JobSchedulePatchJobScheduleHeaderParam & - JobSchedulePatchJobScheduleBodyParam & - RequestParameters; +export interface CreateTaskMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type CreateTaskParameters = CreateTaskQueryParam & + CreateTaskHeaderParam & + CreateTaskMediaTypesParam & + CreateTaskBodyParam & + RequestParameters; -export interface JobScheduleUpdateJobScheduleHeaders { +export interface ListTasksHeaders { + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2491,44 +2600,64 @@ export interface JobScheduleUpdateJobScheduleHeaders { "client-request-id"?: string; /** Whether the server should return the client-request-id in the response. */ "return-client-request-id"?: boolean; +} + +export interface ListTasksQueryParamProperties { /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. */ - "ocp-date"?: string; + maxresults?: number; /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. */ - "if-match"?: string; + timeOut?: number; /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. */ - "if-none-match"?: string; + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; +} + +export interface ListTasksQueryParam { + queryParameters?: ListTasksQueryParamProperties; +} + +export interface ListTasksHeaderParam { + headers?: RawHttpHeadersInput & ListTasksHeaders; +} + +export type ListTasksParameters = ListTasksQueryParam & + ListTasksHeaderParam & + RequestParameters; + +export interface CreateTaskCollectionHeaders { /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. */ - "if-modified-since"?: string; + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. */ - "if-unmodified-since"?: string; + "ocp-date"?: string; } -export interface JobScheduleUpdateJobScheduleBodyParam { - /** The parameters for the request. */ - body: BatchJobSchedule; +export interface CreateTaskCollectionBodyParam { + /** The Tasks to be added. */ + body: BatchTaskCollection; } -export interface JobScheduleUpdateJobScheduleQueryParamProperties { +export interface CreateTaskCollectionQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2536,21 +2665,26 @@ export interface JobScheduleUpdateJobScheduleQueryParamProperties { timeOut?: number; } -export interface JobScheduleUpdateJobScheduleQueryParam { - queryParameters?: JobScheduleUpdateJobScheduleQueryParamProperties; +export interface CreateTaskCollectionQueryParam { + queryParameters?: CreateTaskCollectionQueryParamProperties; } -export interface JobScheduleUpdateJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleUpdateJobScheduleHeaders; +export interface CreateTaskCollectionHeaderParam { + headers?: RawHttpHeadersInput & CreateTaskCollectionHeaders; } -export type JobScheduleUpdateJobScheduleParameters = - JobScheduleUpdateJobScheduleQueryParam & - JobScheduleUpdateJobScheduleHeaderParam & - JobScheduleUpdateJobScheduleBodyParam & - RequestParameters; +export interface CreateTaskCollectionMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type CreateTaskCollectionParameters = CreateTaskCollectionQueryParam & + CreateTaskCollectionHeaderParam & + CreateTaskCollectionMediaTypesParam & + CreateTaskCollectionBodyParam & + RequestParameters; -export interface JobScheduleDisableJobScheduleHeaders { +export interface DeleteTaskHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2590,7 +2724,7 @@ export interface JobScheduleDisableJobScheduleHeaders { "if-unmodified-since"?: string; } -export interface JobScheduleDisableJobScheduleQueryParamProperties { +export interface DeleteTaskQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2598,20 +2732,19 @@ export interface JobScheduleDisableJobScheduleQueryParamProperties { timeOut?: number; } -export interface JobScheduleDisableJobScheduleQueryParam { - queryParameters?: JobScheduleDisableJobScheduleQueryParamProperties; +export interface DeleteTaskQueryParam { + queryParameters?: DeleteTaskQueryParamProperties; } -export interface JobScheduleDisableJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleDisableJobScheduleHeaders; +export interface DeleteTaskHeaderParam { + headers?: RawHttpHeadersInput & DeleteTaskHeaders; } -export type JobScheduleDisableJobScheduleParameters = - JobScheduleDisableJobScheduleQueryParam & - JobScheduleDisableJobScheduleHeaderParam & - RequestParameters; +export type DeleteTaskParameters = DeleteTaskQueryParam & + DeleteTaskHeaderParam & + RequestParameters; -export interface JobScheduleEnableJobScheduleHeaders { +export interface GetTaskHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2651,28 +2784,31 @@ export interface JobScheduleEnableJobScheduleHeaders { "if-unmodified-since"?: string; } -export interface JobScheduleEnableJobScheduleQueryParamProperties { +export interface GetTaskQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; + /** An OData $expand clause. */ + $expand?: string[]; } -export interface JobScheduleEnableJobScheduleQueryParam { - queryParameters?: JobScheduleEnableJobScheduleQueryParamProperties; +export interface GetTaskQueryParam { + queryParameters?: GetTaskQueryParamProperties; } -export interface JobScheduleEnableJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleEnableJobScheduleHeaders; +export interface GetTaskHeaderParam { + headers?: RawHttpHeadersInput & GetTaskHeaders; } -export type JobScheduleEnableJobScheduleParameters = - JobScheduleEnableJobScheduleQueryParam & - JobScheduleEnableJobScheduleHeaderParam & - RequestParameters; +export type GetTaskParameters = GetTaskQueryParam & + GetTaskHeaderParam & + RequestParameters; -export interface JobScheduleTerminateJobScheduleHeaders { +export interface ReplaceTaskHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2712,7 +2848,12 @@ export interface JobScheduleTerminateJobScheduleHeaders { "if-unmodified-since"?: string; } -export interface JobScheduleTerminateJobScheduleQueryParamProperties { +export interface ReplaceTaskBodyParam { + /** The Task to update. */ + body: BatchTask; +} + +export interface ReplaceTaskQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2720,20 +2861,26 @@ export interface JobScheduleTerminateJobScheduleQueryParamProperties { timeOut?: number; } -export interface JobScheduleTerminateJobScheduleQueryParam { - queryParameters?: JobScheduleTerminateJobScheduleQueryParamProperties; +export interface ReplaceTaskQueryParam { + queryParameters?: ReplaceTaskQueryParamProperties; } -export interface JobScheduleTerminateJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleTerminateJobScheduleHeaders; +export interface ReplaceTaskHeaderParam { + headers?: RawHttpHeadersInput & ReplaceTaskHeaders; } -export type JobScheduleTerminateJobScheduleParameters = - JobScheduleTerminateJobScheduleQueryParam & - JobScheduleTerminateJobScheduleHeaderParam & - RequestParameters; +export interface ReplaceTaskMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type ReplaceTaskParameters = ReplaceTaskQueryParam & + ReplaceTaskHeaderParam & + ReplaceTaskMediaTypesParam & + ReplaceTaskBodyParam & + RequestParameters; -export interface JobScheduleAddJobScheduleHeaders { +export interface ListSubTasksHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2749,34 +2896,36 @@ export interface JobScheduleAddJobScheduleHeaders { "ocp-date"?: string; } -export interface JobScheduleAddJobScheduleBodyParam { - /** The Job Schedule to be added. */ - body: BatchJobSchedule; -} - -export interface JobScheduleAddJobScheduleQueryParamProperties { +export interface ListSubTasksQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; } -export interface JobScheduleAddJobScheduleQueryParam { - queryParameters?: JobScheduleAddJobScheduleQueryParamProperties; +export interface ListSubTasksQueryParam { + queryParameters?: ListSubTasksQueryParamProperties; } -export interface JobScheduleAddJobScheduleHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleAddJobScheduleHeaders; +export interface ListSubTasksHeaderParam { + headers?: RawHttpHeadersInput & ListSubTasksHeaders; } -export type JobScheduleAddJobScheduleParameters = - JobScheduleAddJobScheduleQueryParam & - JobScheduleAddJobScheduleHeaderParam & - JobScheduleAddJobScheduleBodyParam & - RequestParameters; +export type ListSubTasksParameters = ListSubTasksQueryParam & + ListSubTasksHeaderParam & + RequestParameters; -export interface JobScheduleListJobSchedulesHeaders { +export interface TerminateTaskHeaders { + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -2784,50 +2933,52 @@ export interface JobScheduleListJobSchedulesHeaders { */ "ocp-date"?: string; /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface JobScheduleListJobSchedulesQueryParamProperties { + "if-match"?: string; /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. */ - maxresults?: number; + "if-none-match"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + "if-modified-since"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + "if-unmodified-since"?: string; +} + +export interface TerminateTaskQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. - */ - $filter?: string; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; } -export interface JobScheduleListJobSchedulesQueryParam { - queryParameters?: JobScheduleListJobSchedulesQueryParamProperties; +export interface TerminateTaskQueryParam { + queryParameters?: TerminateTaskQueryParamProperties; } -export interface JobScheduleListJobSchedulesHeaderParam { - headers?: RawHttpHeadersInput & JobScheduleListJobSchedulesHeaders; +export interface TerminateTaskHeaderParam { + headers?: RawHttpHeadersInput & TerminateTaskHeaders; } -export type JobScheduleListJobSchedulesParameters = - JobScheduleListJobSchedulesQueryParam & - JobScheduleListJobSchedulesHeaderParam & - RequestParameters; +export type TerminateTaskParameters = TerminateTaskQueryParam & + TerminateTaskHeaderParam & + RequestParameters; -export interface TaskAddTaskHeaders { +export interface ReactivateTaskHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2841,14 +2992,33 @@ export interface TaskAddTaskHeaders { * directly. */ "ocp-date"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service exactly matches the value specified by the client. + */ + "if-match"?: string; + /** + * An ETag value associated with the version of the resource known to the client. + * The operation will be performed only if the resource's current ETag on the + * service does not match the value specified by the client. + */ + "if-none-match"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + "if-modified-since"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + "if-unmodified-since"?: string; } -export interface TaskAddTaskBodyParam { - /** The Task to be added. */ - body: BatchTask; -} - -export interface TaskAddTaskQueryParamProperties { +export interface ReactivateTaskQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2856,26 +3026,19 @@ export interface TaskAddTaskQueryParamProperties { timeOut?: number; } -export interface TaskAddTaskQueryParam { - queryParameters?: TaskAddTaskQueryParamProperties; +export interface ReactivateTaskQueryParam { + queryParameters?: ReactivateTaskQueryParamProperties; } -export interface TaskAddTaskHeaderParam { - headers?: RawHttpHeadersInput & TaskAddTaskHeaders; +export interface ReactivateTaskHeaderParam { + headers?: RawHttpHeadersInput & ReactivateTaskHeaders; } -export type TaskAddTaskParameters = TaskAddTaskQueryParam & - TaskAddTaskHeaderParam & - TaskAddTaskBodyParam & +export type ReactivateTaskParameters = ReactivateTaskQueryParam & + ReactivateTaskHeaderParam & RequestParameters; -export interface TaskListTasksHeaders { - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; +export interface DeleteTaskFileHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2883,43 +3046,42 @@ export interface TaskListTasksHeaders { "client-request-id"?: string; /** Whether the server should return the client-request-id in the response. */ "return-client-request-id"?: boolean; -} - -export interface TaskListTasksQueryParamProperties { /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. */ - maxresults?: number; + "ocp-date"?: string; +} + +export interface DeleteTaskFileQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + * Whether to delete children of a directory. If the filePath parameter represents + * a directory instead of a file, you can set recursive to true to delete the + * directory and all of the files and subdirectories in it. If recursive is false + * then the directory must be empty or deletion will fail. */ - $filter?: string; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; + recursive?: boolean; } -export interface TaskListTasksQueryParam { - queryParameters?: TaskListTasksQueryParamProperties; +export interface DeleteTaskFileQueryParam { + queryParameters?: DeleteTaskFileQueryParamProperties; } -export interface TaskListTasksHeaderParam { - headers?: RawHttpHeadersInput & TaskListTasksHeaders; +export interface DeleteTaskFileHeaderParam { + headers?: RawHttpHeadersInput & DeleteTaskFileHeaders; } -export type TaskListTasksParameters = TaskListTasksQueryParam & - TaskListTasksHeaderParam & +export type DeleteTaskFileParameters = DeleteTaskFileQueryParam & + DeleteTaskFileHeaderParam & RequestParameters; -export interface TaskAddTaskCollectionHeaders { +export interface GetTaskFileHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2933,14 +3095,26 @@ export interface TaskAddTaskCollectionHeaders { * directly. */ "ocp-date"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + "if-modified-since"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + "if-unmodified-since"?: string; + /** + * The byte range to be retrieved. The default is to retrieve the entire file. The + * format is bytes=startRange-endRange. + */ + "ocp-range"?: string; } -export interface TaskAddTaskCollectionBodyParam { - /** The Tasks to be added. */ - body: BatchTaskCollection; -} - -export interface TaskAddTaskCollectionQueryParamProperties { +export interface GetTaskFileQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -2948,20 +3122,19 @@ export interface TaskAddTaskCollectionQueryParamProperties { timeOut?: number; } -export interface TaskAddTaskCollectionQueryParam { - queryParameters?: TaskAddTaskCollectionQueryParamProperties; +export interface GetTaskFileQueryParam { + queryParameters?: GetTaskFileQueryParamProperties; } -export interface TaskAddTaskCollectionHeaderParam { - headers?: RawHttpHeadersInput & TaskAddTaskCollectionHeaders; +export interface GetTaskFileHeaderParam { + headers?: RawHttpHeadersInput & GetTaskFileHeaders; } -export type TaskAddTaskCollectionParameters = TaskAddTaskCollectionQueryParam & - TaskAddTaskCollectionHeaderParam & - TaskAddTaskCollectionBodyParam & +export type GetTaskFileParameters = GetTaskFileQueryParam & + GetTaskFileHeaderParam & RequestParameters; -export interface TaskDeleteTaskCollectionHeaders { +export interface GetTaskFilePropertiesHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -2975,18 +3148,6 @@ export interface TaskDeleteTaskCollectionHeaders { * directly. */ "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; /** * A timestamp indicating the last modified time of the resource known to the * client. The operation will be performed only if the resource on the service has @@ -3001,7 +3162,7 @@ export interface TaskDeleteTaskCollectionHeaders { "if-unmodified-since"?: string; } -export interface TaskDeleteTaskCollectionQueryParamProperties { +export interface GetTaskFilePropertiesQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3009,84 +3170,70 @@ export interface TaskDeleteTaskCollectionQueryParamProperties { timeOut?: number; } -export interface TaskDeleteTaskCollectionQueryParam { - queryParameters?: TaskDeleteTaskCollectionQueryParamProperties; +export interface GetTaskFilePropertiesQueryParam { + queryParameters?: GetTaskFilePropertiesQueryParamProperties; } -export interface TaskDeleteTaskCollectionHeaderParam { - headers?: RawHttpHeadersInput & TaskDeleteTaskCollectionHeaders; +export interface GetTaskFilePropertiesHeaderParam { + headers?: RawHttpHeadersInput & GetTaskFilePropertiesHeaders; } -export type TaskDeleteTaskCollectionParameters = - TaskDeleteTaskCollectionQueryParam & - TaskDeleteTaskCollectionHeaderParam & - RequestParameters; +export type GetTaskFilePropertiesParameters = GetTaskFilePropertiesQueryParam & + GetTaskFilePropertiesHeaderParam & + RequestParameters; -export interface TaskGetTaskCollectionHeaders { - /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; +export interface ListTaskFilesHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API * directly. */ "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. */ - "if-unmodified-since"?: string; + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; } -export interface TaskGetTaskCollectionQueryParamProperties { +export interface ListTaskFilesQueryParamProperties { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; - /** An OData $select clause. */ - $select?: string; - /** An OData $expand clause. */ - $expand?: string; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + */ + $filter?: string; + /** + * Whether to list children of the Task directory. This parameter can be used in + * combination with the filter parameter to list specific type of files. + */ + recursive?: boolean; } -export interface TaskGetTaskCollectionQueryParam { - queryParameters?: TaskGetTaskCollectionQueryParamProperties; +export interface ListTaskFilesQueryParam { + queryParameters?: ListTaskFilesQueryParamProperties; } -export interface TaskGetTaskCollectionHeaderParam { - headers?: RawHttpHeadersInput & TaskGetTaskCollectionHeaders; +export interface ListTaskFilesHeaderParam { + headers?: RawHttpHeadersInput & ListTaskFilesHeaders; } -export type TaskGetTaskCollectionParameters = TaskGetTaskCollectionQueryParam & - TaskGetTaskCollectionHeaderParam & +export type ListTaskFilesParameters = ListTaskFilesQueryParam & + ListTaskFilesHeaderParam & RequestParameters; -export interface TaskUpdateTaskCollectionHeaders { +export interface CreateNodeUserHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3100,38 +3247,14 @@ export interface TaskUpdateTaskCollectionHeaders { * directly. */ "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; } -export interface TaskUpdateTaskCollectionBodyParam { - /** The parameters for the request. */ - body: BatchTask; +export interface CreateNodeUserBodyParam { + /** The options to use for creating the user. */ + body: BatchNodeUserCreateOptions; } -export interface TaskUpdateTaskCollectionQueryParamProperties { +export interface CreateNodeUserQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3139,21 +3262,26 @@ export interface TaskUpdateTaskCollectionQueryParamProperties { timeOut?: number; } -export interface TaskUpdateTaskCollectionQueryParam { - queryParameters?: TaskUpdateTaskCollectionQueryParamProperties; +export interface CreateNodeUserQueryParam { + queryParameters?: CreateNodeUserQueryParamProperties; } -export interface TaskUpdateTaskCollectionHeaderParam { - headers?: RawHttpHeadersInput & TaskUpdateTaskCollectionHeaders; +export interface CreateNodeUserHeaderParam { + headers?: RawHttpHeadersInput & CreateNodeUserHeaders; } -export type TaskUpdateTaskCollectionParameters = - TaskUpdateTaskCollectionQueryParam & - TaskUpdateTaskCollectionHeaderParam & - TaskUpdateTaskCollectionBodyParam & - RequestParameters; +export interface CreateNodeUserMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type CreateNodeUserParameters = CreateNodeUserQueryParam & + CreateNodeUserHeaderParam & + CreateNodeUserMediaTypesParam & + CreateNodeUserBodyParam & + RequestParameters; -export interface TaskListSubtasksHeaders { +export interface DeleteNodeUserHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3169,29 +3297,27 @@ export interface TaskListSubtasksHeaders { "ocp-date"?: string; } -export interface TaskListSubtasksQueryParamProperties { +export interface DeleteNodeUserQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; - /** An OData $select clause. */ - $select?: string; } -export interface TaskListSubtasksQueryParam { - queryParameters?: TaskListSubtasksQueryParamProperties; +export interface DeleteNodeUserQueryParam { + queryParameters?: DeleteNodeUserQueryParamProperties; } -export interface TaskListSubtasksHeaderParam { - headers?: RawHttpHeadersInput & TaskListSubtasksHeaders; +export interface DeleteNodeUserHeaderParam { + headers?: RawHttpHeadersInput & DeleteNodeUserHeaders; } -export type TaskListSubtasksParameters = TaskListSubtasksQueryParam & - TaskListSubtasksHeaderParam & +export type DeleteNodeUserParameters = DeleteNodeUserQueryParam & + DeleteNodeUserHeaderParam & RequestParameters; -export interface TaskTerminateTaskCollectionHeaders { +export interface ReplaceNodeUserHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3205,33 +3331,14 @@ export interface TaskTerminateTaskCollectionHeaders { * directly. */ "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; } -export interface TaskTerminateTaskCollectionQueryParamProperties { +export interface ReplaceNodeUserBodyParam { + /** The options to use for updating the user. */ + body: BatchNodeUserUpdateOptions; +} + +export interface ReplaceNodeUserQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3239,20 +3346,26 @@ export interface TaskTerminateTaskCollectionQueryParamProperties { timeOut?: number; } -export interface TaskTerminateTaskCollectionQueryParam { - queryParameters?: TaskTerminateTaskCollectionQueryParamProperties; +export interface ReplaceNodeUserQueryParam { + queryParameters?: ReplaceNodeUserQueryParamProperties; } -export interface TaskTerminateTaskCollectionHeaderParam { - headers?: RawHttpHeadersInput & TaskTerminateTaskCollectionHeaders; +export interface ReplaceNodeUserHeaderParam { + headers?: RawHttpHeadersInput & ReplaceNodeUserHeaders; } -export type TaskTerminateTaskCollectionParameters = - TaskTerminateTaskCollectionQueryParam & - TaskTerminateTaskCollectionHeaderParam & - RequestParameters; +export interface ReplaceNodeUserMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type ReplaceNodeUserParameters = ReplaceNodeUserQueryParam & + ReplaceNodeUserHeaderParam & + ReplaceNodeUserMediaTypesParam & + ReplaceNodeUserBodyParam & + RequestParameters; -export interface TaskReactivateTaskCollectionHeaders { +export interface GetNodeHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3266,54 +3379,31 @@ export interface TaskReactivateTaskCollectionHeaders { * directly. */ "ocp-date"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service exactly matches the value specified by the client. - */ - "if-match"?: string; - /** - * An ETag value associated with the version of the resource known to the client. - * The operation will be performed only if the resource's current ETag on the - * service does not match the value specified by the client. - */ - "if-none-match"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * been modified since the specified time. - */ - "if-modified-since"?: string; - /** - * A timestamp indicating the last modified time of the resource known to the - * client. The operation will be performed only if the resource on the service has - * not been modified since the specified time. - */ - "if-unmodified-since"?: string; } -export interface TaskReactivateTaskCollectionQueryParamProperties { +export interface GetNodeQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; } -export interface TaskReactivateTaskCollectionQueryParam { - queryParameters?: TaskReactivateTaskCollectionQueryParamProperties; +export interface GetNodeQueryParam { + queryParameters?: GetNodeQueryParamProperties; } -export interface TaskReactivateTaskCollectionHeaderParam { - headers?: RawHttpHeadersInput & TaskReactivateTaskCollectionHeaders; +export interface GetNodeHeaderParam { + headers?: RawHttpHeadersInput & GetNodeHeaders; } -export type TaskReactivateTaskCollectionParameters = - TaskReactivateTaskCollectionQueryParam & - TaskReactivateTaskCollectionHeaderParam & - RequestParameters; +export type GetNodeParameters = GetNodeQueryParam & + GetNodeHeaderParam & + RequestParameters; -export interface ComputeNodesAddUserHeaders { +export interface RebootNodeHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3329,12 +3419,12 @@ export interface ComputeNodesAddUserHeaders { "ocp-date"?: string; } -export interface ComputeNodesAddUserBodyParam { - /** The user Account to be created. */ - body: ComputeNodeUser; +export interface RebootNodeBodyParam { + /** The options to use for rebooting the Compute Node. */ + body?: NodeRebootOptions; } -export interface ComputeNodesAddUserQueryParamProperties { +export interface RebootNodeQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3342,20 +3432,26 @@ export interface ComputeNodesAddUserQueryParamProperties { timeOut?: number; } -export interface ComputeNodesAddUserQueryParam { - queryParameters?: ComputeNodesAddUserQueryParamProperties; +export interface RebootNodeQueryParam { + queryParameters?: RebootNodeQueryParamProperties; +} + +export interface RebootNodeHeaderParam { + headers?: RawHttpHeadersInput & RebootNodeHeaders; } -export interface ComputeNodesAddUserHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesAddUserHeaders; +export interface RebootNodeMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; } -export type ComputeNodesAddUserParameters = ComputeNodesAddUserQueryParam & - ComputeNodesAddUserHeaderParam & - ComputeNodesAddUserBodyParam & +export type RebootNodeParameters = RebootNodeQueryParam & + RebootNodeHeaderParam & + RebootNodeMediaTypesParam & + RebootNodeBodyParam & RequestParameters; -export interface ComputeNodesDeleteUserHeaders { +export interface ReimageNodeHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3371,7 +3467,12 @@ export interface ComputeNodesDeleteUserHeaders { "ocp-date"?: string; } -export interface ComputeNodesDeleteUserQueryParamProperties { +export interface ReimageNodeBodyParam { + /** The options to use for reimaging the Compute Node. */ + body?: NodeReimageOptions; +} + +export interface ReimageNodeQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3379,20 +3480,26 @@ export interface ComputeNodesDeleteUserQueryParamProperties { timeOut?: number; } -export interface ComputeNodesDeleteUserQueryParam { - queryParameters?: ComputeNodesDeleteUserQueryParamProperties; +export interface ReimageNodeQueryParam { + queryParameters?: ReimageNodeQueryParamProperties; } -export interface ComputeNodesDeleteUserHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesDeleteUserHeaders; +export interface ReimageNodeHeaderParam { + headers?: RawHttpHeadersInput & ReimageNodeHeaders; } -export type ComputeNodesDeleteUserParameters = - ComputeNodesDeleteUserQueryParam & - ComputeNodesDeleteUserHeaderParam & - RequestParameters; +export interface ReimageNodeMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type ReimageNodeParameters = ReimageNodeQueryParam & + ReimageNodeHeaderParam & + ReimageNodeMediaTypesParam & + ReimageNodeBodyParam & + RequestParameters; -export interface ComputeNodesUpdateUserHeaders { +export interface DisableNodeSchedulingHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3408,12 +3515,12 @@ export interface ComputeNodesUpdateUserHeaders { "ocp-date"?: string; } -export interface ComputeNodesUpdateUserBodyParam { - /** The parameters for the request. */ - body: NodeUpdateUserParameters; +export interface DisableNodeSchedulingBodyParam { + /** The options to use for disabling scheduling on the Compute Node. */ + body?: NodeDisableSchedulingOptions; } -export interface ComputeNodesUpdateUserQueryParamProperties { +export interface DisableNodeSchedulingQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3421,21 +3528,26 @@ export interface ComputeNodesUpdateUserQueryParamProperties { timeOut?: number; } -export interface ComputeNodesUpdateUserQueryParam { - queryParameters?: ComputeNodesUpdateUserQueryParamProperties; +export interface DisableNodeSchedulingQueryParam { + queryParameters?: DisableNodeSchedulingQueryParamProperties; } -export interface ComputeNodesUpdateUserHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesUpdateUserHeaders; +export interface DisableNodeSchedulingHeaderParam { + headers?: RawHttpHeadersInput & DisableNodeSchedulingHeaders; } -export type ComputeNodesUpdateUserParameters = - ComputeNodesUpdateUserQueryParam & - ComputeNodesUpdateUserHeaderParam & - ComputeNodesUpdateUserBodyParam & - RequestParameters; +export interface DisableNodeSchedulingMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type DisableNodeSchedulingParameters = DisableNodeSchedulingQueryParam & + DisableNodeSchedulingHeaderParam & + DisableNodeSchedulingMediaTypesParam & + DisableNodeSchedulingBodyParam & + RequestParameters; -export interface ComputeNodesGetComputeNodeHeaders { +export interface EnableNodeSchedulingHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3451,30 +3563,27 @@ export interface ComputeNodesGetComputeNodeHeaders { "ocp-date"?: string; } -export interface ComputeNodesGetComputeNodeQueryParamProperties { +export interface EnableNodeSchedulingQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; - /** An OData $select clause. */ - $select?: string; } -export interface ComputeNodesGetComputeNodeQueryParam { - queryParameters?: ComputeNodesGetComputeNodeQueryParamProperties; +export interface EnableNodeSchedulingQueryParam { + queryParameters?: EnableNodeSchedulingQueryParamProperties; } -export interface ComputeNodesGetComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesGetComputeNodeHeaders; +export interface EnableNodeSchedulingHeaderParam { + headers?: RawHttpHeadersInput & EnableNodeSchedulingHeaders; } -export type ComputeNodesGetComputeNodeParameters = - ComputeNodesGetComputeNodeQueryParam & - ComputeNodesGetComputeNodeHeaderParam & - RequestParameters; +export type EnableNodeSchedulingParameters = EnableNodeSchedulingQueryParam & + EnableNodeSchedulingHeaderParam & + RequestParameters; -export interface ComputeNodesRebootComputeNodeHeaders { +export interface GetNodeRemoteLoginSettingsHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3490,12 +3599,7 @@ export interface ComputeNodesRebootComputeNodeHeaders { "ocp-date"?: string; } -export interface ComputeNodesRebootComputeNodeBodyParam { - /** The parameters for the request. */ - body?: NodeRebootParameters; -} - -export interface ComputeNodesRebootComputeNodeQueryParamProperties { +export interface GetNodeRemoteLoginSettingsQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3503,21 +3607,20 @@ export interface ComputeNodesRebootComputeNodeQueryParamProperties { timeOut?: number; } -export interface ComputeNodesRebootComputeNodeQueryParam { - queryParameters?: ComputeNodesRebootComputeNodeQueryParamProperties; +export interface GetNodeRemoteLoginSettingsQueryParam { + queryParameters?: GetNodeRemoteLoginSettingsQueryParamProperties; } -export interface ComputeNodesRebootComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesRebootComputeNodeHeaders; +export interface GetNodeRemoteLoginSettingsHeaderParam { + headers?: RawHttpHeadersInput & GetNodeRemoteLoginSettingsHeaders; } -export type ComputeNodesRebootComputeNodeParameters = - ComputeNodesRebootComputeNodeQueryParam & - ComputeNodesRebootComputeNodeHeaderParam & - ComputeNodesRebootComputeNodeBodyParam & +export type GetNodeRemoteLoginSettingsParameters = + GetNodeRemoteLoginSettingsQueryParam & + GetNodeRemoteLoginSettingsHeaderParam & RequestParameters; -export interface ComputeNodesReimageComputeNodeHeaders { +export interface GetNodeRemoteDesktopFileHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3533,12 +3636,7 @@ export interface ComputeNodesReimageComputeNodeHeaders { "ocp-date"?: string; } -export interface ComputeNodesReimageComputeNodeBodyParam { - /** The parameters for the request. */ - body?: NodeReimageParameters; -} - -export interface ComputeNodesReimageComputeNodeQueryParamProperties { +export interface GetNodeRemoteDesktopFileQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3546,21 +3644,20 @@ export interface ComputeNodesReimageComputeNodeQueryParamProperties { timeOut?: number; } -export interface ComputeNodesReimageComputeNodeQueryParam { - queryParameters?: ComputeNodesReimageComputeNodeQueryParamProperties; +export interface GetNodeRemoteDesktopFileQueryParam { + queryParameters?: GetNodeRemoteDesktopFileQueryParamProperties; } -export interface ComputeNodesReimageComputeNodeHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesReimageComputeNodeHeaders; +export interface GetNodeRemoteDesktopFileHeaderParam { + headers?: RawHttpHeadersInput & GetNodeRemoteDesktopFileHeaders; } -export type ComputeNodesReimageComputeNodeParameters = - ComputeNodesReimageComputeNodeQueryParam & - ComputeNodesReimageComputeNodeHeaderParam & - ComputeNodesReimageComputeNodeBodyParam & +export type GetNodeRemoteDesktopFileParameters = + GetNodeRemoteDesktopFileQueryParam & + GetNodeRemoteDesktopFileHeaderParam & RequestParameters; -export interface ComputeNodesDisableSchedulingHeaders { +export interface UploadNodeLogsHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3576,12 +3673,12 @@ export interface ComputeNodesDisableSchedulingHeaders { "ocp-date"?: string; } -export interface ComputeNodesDisableSchedulingBodyParam { - /** The parameters for the request. */ - body?: NodeDisableSchedulingParameters; +export interface UploadNodeLogsBodyParam { + /** The Azure Batch service log files upload options. */ + body: UploadBatchServiceLogsOptions; } -export interface ComputeNodesDisableSchedulingQueryParamProperties { +export interface UploadNodeLogsQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3589,21 +3686,32 @@ export interface ComputeNodesDisableSchedulingQueryParamProperties { timeOut?: number; } -export interface ComputeNodesDisableSchedulingQueryParam { - queryParameters?: ComputeNodesDisableSchedulingQueryParamProperties; +export interface UploadNodeLogsQueryParam { + queryParameters?: UploadNodeLogsQueryParamProperties; } -export interface ComputeNodesDisableSchedulingHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesDisableSchedulingHeaders; +export interface UploadNodeLogsHeaderParam { + headers?: RawHttpHeadersInput & UploadNodeLogsHeaders; } -export type ComputeNodesDisableSchedulingParameters = - ComputeNodesDisableSchedulingQueryParam & - ComputeNodesDisableSchedulingHeaderParam & - ComputeNodesDisableSchedulingBodyParam & - RequestParameters; +export interface UploadNodeLogsMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} -export interface ComputeNodesEnableSchedulingHeaders { +export type UploadNodeLogsParameters = UploadNodeLogsQueryParam & + UploadNodeLogsHeaderParam & + UploadNodeLogsMediaTypesParam & + UploadNodeLogsBodyParam & + RequestParameters; + +export interface ListNodesHeaders { + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3611,36 +3719,41 @@ export interface ComputeNodesEnableSchedulingHeaders { "client-request-id"?: string; /** Whether the server should return the client-request-id in the response. */ "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; } -export interface ComputeNodesEnableSchedulingQueryParamProperties { +export interface ListNodesQueryParamProperties { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + */ + $filter?: string; + /** An OData $select clause. */ + $select?: string[]; } -export interface ComputeNodesEnableSchedulingQueryParam { - queryParameters?: ComputeNodesEnableSchedulingQueryParamProperties; +export interface ListNodesQueryParam { + queryParameters?: ListNodesQueryParamProperties; } -export interface ComputeNodesEnableSchedulingHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesEnableSchedulingHeaders; +export interface ListNodesHeaderParam { + headers?: RawHttpHeadersInput & ListNodesHeaders; } -export type ComputeNodesEnableSchedulingParameters = - ComputeNodesEnableSchedulingQueryParam & - ComputeNodesEnableSchedulingHeaderParam & - RequestParameters; +export type ListNodesParameters = ListNodesQueryParam & + ListNodesHeaderParam & + RequestParameters; -export interface ComputeNodesGetRemoteLoginSettingsHeaders { +export interface GetNodeExtensionHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3656,28 +3769,35 @@ export interface ComputeNodesGetRemoteLoginSettingsHeaders { "ocp-date"?: string; } -export interface ComputeNodesGetRemoteLoginSettingsQueryParamProperties { +export interface GetNodeExtensionQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; } -export interface ComputeNodesGetRemoteLoginSettingsQueryParam { - queryParameters?: ComputeNodesGetRemoteLoginSettingsQueryParamProperties; +export interface GetNodeExtensionQueryParam { + queryParameters?: GetNodeExtensionQueryParamProperties; } -export interface ComputeNodesGetRemoteLoginSettingsHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesGetRemoteLoginSettingsHeaders; +export interface GetNodeExtensionHeaderParam { + headers?: RawHttpHeadersInput & GetNodeExtensionHeaders; } -export type ComputeNodesGetRemoteLoginSettingsParameters = - ComputeNodesGetRemoteLoginSettingsQueryParam & - ComputeNodesGetRemoteLoginSettingsHeaderParam & - RequestParameters; +export type GetNodeExtensionParameters = GetNodeExtensionQueryParam & + GetNodeExtensionHeaderParam & + RequestParameters; -export interface ComputeNodesGetRemoteDesktopHeaders { +export interface ListNodeExtensionsHeaders { + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3685,36 +3805,36 @@ export interface ComputeNodesGetRemoteDesktopHeaders { "client-request-id"?: string; /** Whether the server should return the client-request-id in the response. */ "return-client-request-id"?: boolean; - /** - * The time the request was issued. Client libraries typically set this to the - * current system clock time; set it explicitly if you are calling the REST API - * directly. - */ - "ocp-date"?: string; } -export interface ComputeNodesGetRemoteDesktopQueryParamProperties { +export interface ListNodeExtensionsQueryParamProperties { + /** + * The maximum number of items to return in the response. A maximum of 1000 + * applications can be returned. + */ + maxresults?: number; /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** An OData $select clause. */ + $select?: string[]; } -export interface ComputeNodesGetRemoteDesktopQueryParam { - queryParameters?: ComputeNodesGetRemoteDesktopQueryParamProperties; +export interface ListNodeExtensionsQueryParam { + queryParameters?: ListNodeExtensionsQueryParamProperties; } -export interface ComputeNodesGetRemoteDesktopHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesGetRemoteDesktopHeaders; +export interface ListNodeExtensionsHeaderParam { + headers?: RawHttpHeadersInput & ListNodeExtensionsHeaders; } -export type ComputeNodesGetRemoteDesktopParameters = - ComputeNodesGetRemoteDesktopQueryParam & - ComputeNodesGetRemoteDesktopHeaderParam & - RequestParameters; +export type ListNodeExtensionsParameters = ListNodeExtensionsQueryParam & + ListNodeExtensionsHeaderParam & + RequestParameters; -export interface ComputeNodesUploadBatchServiceLogsHeaders { +export interface DeleteNodeFileHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3730,34 +3850,41 @@ export interface ComputeNodesUploadBatchServiceLogsHeaders { "ocp-date"?: string; } -export interface ComputeNodesUploadBatchServiceLogsBodyParam { - /** The Azure Batch service log files upload configuration. */ - body: UploadBatchServiceLogsConfiguration; -} - -export interface ComputeNodesUploadBatchServiceLogsQueryParamProperties { +export interface DeleteNodeFileQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; + /** + * Whether to delete children of a directory. If the filePath parameter represents + * a directory instead of a file, you can set recursive to true to delete the + * directory and all of the files and subdirectories in it. If recursive is false + * then the directory must be empty or deletion will fail. + */ + recursive?: boolean; } -export interface ComputeNodesUploadBatchServiceLogsQueryParam { - queryParameters?: ComputeNodesUploadBatchServiceLogsQueryParamProperties; +export interface DeleteNodeFileQueryParam { + queryParameters?: DeleteNodeFileQueryParamProperties; } -export interface ComputeNodesUploadBatchServiceLogsHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesUploadBatchServiceLogsHeaders; +export interface DeleteNodeFileHeaderParam { + headers?: RawHttpHeadersInput & DeleteNodeFileHeaders; } -export type ComputeNodesUploadBatchServiceLogsParameters = - ComputeNodesUploadBatchServiceLogsQueryParam & - ComputeNodesUploadBatchServiceLogsHeaderParam & - ComputeNodesUploadBatchServiceLogsBodyParam & - RequestParameters; +export type DeleteNodeFileParameters = DeleteNodeFileQueryParam & + DeleteNodeFileHeaderParam & + RequestParameters; -export interface ComputeNodesListHeaders { +export interface GetNodeFileHeaders { + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -3765,47 +3892,45 @@ export interface ComputeNodesListHeaders { */ "ocp-date"?: string; /** - * The caller-generated request identity, in the form of a GUID with no decoration - * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. */ - "client-request-id"?: string; - /** Whether the server should return the client-request-id in the response. */ - "return-client-request-id"?: boolean; -} - -export interface ComputeNodesListQueryParamProperties { + "if-modified-since"?: string; /** - * The maximum number of items to return in the response. A maximum of 1000 - * applications can be returned. + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. */ - maxresults?: number; + "if-unmodified-since"?: string; + /** + * The byte range to be retrieved. The default is to retrieve the entire file. The + * format is bytes=startRange-endRange. + */ + "ocp-range"?: string; +} + +export interface GetNodeFileQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; - /** - * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. - */ - $filter?: string; - /** An OData $select clause. */ - $select?: string; } -export interface ComputeNodesListQueryParam { - queryParameters?: ComputeNodesListQueryParamProperties; +export interface GetNodeFileQueryParam { + queryParameters?: GetNodeFileQueryParamProperties; } -export interface ComputeNodesListHeaderParam { - headers?: RawHttpHeadersInput & ComputeNodesListHeaders; +export interface GetNodeFileHeaderParam { + headers?: RawHttpHeadersInput & GetNodeFileHeaders; } -export type ComputeNodesListParameters = ComputeNodesListQueryParam & - ComputeNodesListHeaderParam & +export type GetNodeFileParameters = GetNodeFileQueryParam & + GetNodeFileHeaderParam & RequestParameters; -export interface ComputeNodeExtensionsGetComputeNodeExtensionsHeaders { +export interface GetNodeFilePropertiesHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. @@ -3819,33 +3944,41 @@ export interface ComputeNodeExtensionsGetComputeNodeExtensionsHeaders { * directly. */ "ocp-date"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * been modified since the specified time. + */ + "if-modified-since"?: string; + /** + * A timestamp indicating the last modified time of the resource known to the + * client. The operation will be performed only if the resource on the service has + * not been modified since the specified time. + */ + "if-unmodified-since"?: string; } -export interface ComputeNodeExtensionsGetComputeNodeExtensionsQueryParamProperties { +export interface GetNodeFilePropertiesQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. */ timeOut?: number; - /** An OData $select clause. */ - $select?: string; } -export interface ComputeNodeExtensionsGetComputeNodeExtensionsQueryParam { - queryParameters?: ComputeNodeExtensionsGetComputeNodeExtensionsQueryParamProperties; +export interface GetNodeFilePropertiesQueryParam { + queryParameters?: GetNodeFilePropertiesQueryParamProperties; } -export interface ComputeNodeExtensionsGetComputeNodeExtensionsHeaderParam { - headers?: RawHttpHeadersInput & - ComputeNodeExtensionsGetComputeNodeExtensionsHeaders; +export interface GetNodeFilePropertiesHeaderParam { + headers?: RawHttpHeadersInput & GetNodeFilePropertiesHeaders; } -export type ComputeNodeExtensionsGetComputeNodeExtensionsParameters = - ComputeNodeExtensionsGetComputeNodeExtensionsQueryParam & - ComputeNodeExtensionsGetComputeNodeExtensionsHeaderParam & - RequestParameters; +export type GetNodeFilePropertiesParameters = GetNodeFilePropertiesQueryParam & + GetNodeFilePropertiesHeaderParam & + RequestParameters; -export interface ComputeNodeExtensionsListComputeNodeExtensionsHeaders { +export interface ListNodeFilesHeaders { /** * The time the request was issued. Client libraries typically set this to the * current system clock time; set it explicitly if you are calling the REST API @@ -3861,7 +3994,7 @@ export interface ComputeNodeExtensionsListComputeNodeExtensionsHeaders { "return-client-request-id"?: boolean; } -export interface ComputeNodeExtensionsListComputeNodeExtensionsQueryParamProperties { +export interface ListNodeFilesQueryParamProperties { /** * The maximum number of items to return in the response. A maximum of 1000 * applications can be returned. @@ -3872,20 +4005,23 @@ export interface ComputeNodeExtensionsListComputeNodeExtensionsQueryParamPropert * applications can be returned. */ timeOut?: number; - /** An OData $select clause. */ - $select?: string; + /** + * An OData $filter clause. For more information on constructing this filter, see + * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + */ + $filter?: string; + /** Whether to list children of a directory. */ + recursive?: boolean; } -export interface ComputeNodeExtensionsListComputeNodeExtensionsQueryParam { - queryParameters?: ComputeNodeExtensionsListComputeNodeExtensionsQueryParamProperties; +export interface ListNodeFilesQueryParam { + queryParameters?: ListNodeFilesQueryParamProperties; } -export interface ComputeNodeExtensionsListComputeNodeExtensionsHeaderParam { - headers?: RawHttpHeadersInput & - ComputeNodeExtensionsListComputeNodeExtensionsHeaders; +export interface ListNodeFilesHeaderParam { + headers?: RawHttpHeadersInput & ListNodeFilesHeaders; } -export type ComputeNodeExtensionsListComputeNodeExtensionsParameters = - ComputeNodeExtensionsListComputeNodeExtensionsQueryParam & - ComputeNodeExtensionsListComputeNodeExtensionsHeaderParam & - RequestParameters; +export type ListNodeFilesParameters = ListNodeFilesQueryParam & + ListNodeFilesHeaderParam & + RequestParameters; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/responses.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/responses.ts index f23581f3d5..be7a5c5a64 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/responses.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/responses.ts @@ -2,40 +2,39 @@ // Licensed under the MIT license. import { RawHttpHeaders } from "@azure/core-rest-pipeline"; -import { HttpResponse, ErrorResponse } from "@azure-rest/core-client"; +import { HttpResponse } from "@azure-rest/core-client"; import { ApplicationListResultOutput, - ApplicationOutput, - PoolUsageMetricsListOutput, - PoolStatisticsOutput, + BatchErrorOutput, + BatchApplicationOutput, + PoolListUsageMetricsResultOutput, BatchPoolListResultOutput, BatchPoolOutput, AutoScaleRunOutput, AccountListSupportedImagesResultOutput, PoolNodeCountsListResultOutput, - JobStatisticsOutput, BatchJobOutput, BatchJobListResultOutput, BatchJobListPreparationAndReleaseTaskStatusResultOutput, TaskCountsResultOutput, CertificateListResultOutput, - CertificateOutput, - NodeFileListResultOutput, + BatchCertificateOutput, BatchJobScheduleOutput, BatchJobScheduleListResultOutput, BatchTaskListResultOutput, TaskAddCollectionResultOutput, BatchTaskOutput, BatchTaskListSubtasksResultOutput, - ComputeNodeOutput, - ComputeNodeGetRemoteLoginSettingsResultOutput, + NodeFileListResultOutput, + BatchNodeOutput, + BatchNodeRemoteLoginSettingsResultOutput, UploadBatchServiceLogsResultOutput, - ComputeNodeListResultOutput, + BatchNodeListResultOutput, NodeVMExtensionOutput, NodeVMExtensionListOutput, } from "./outputModels.js"; -export interface ApplicationsListApplications200Headers { +export interface ListApplications200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -47,59 +46,41 @@ export interface ApplicationsListApplications200Headers { } /** The request has succeeded. */ -export interface ApplicationsListApplications200Response extends HttpResponse { +export interface ListApplications200Response extends HttpResponse { status: "200"; body: ApplicationListResultOutput; - headers: RawHttpHeaders & ApplicationsListApplications200Headers; -} - -export interface ApplicationsListApplicationsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ListApplications200Headers; } -export interface ApplicationsListApplicationsDefaultResponse - extends HttpResponse { +export interface ListApplicationsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ApplicationsListApplicationsDefaultHeaders; -} - -/** The request has succeeded. */ -export interface ApplicationsGet200Response extends HttpResponse { - status: "200"; - body: ApplicationOutput; -} - -export interface ApplicationsGetDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: BatchErrorOutput; } -export interface ApplicationsGetDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ApplicationsGetDefaultHeaders; +export interface GetApplication200Headers { + /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ + "client-request-id"?: string; + /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ + "request-id"?: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; } /** The request has succeeded. */ -export interface PoolListUsageMetrics200Response extends HttpResponse { +export interface GetApplication200Response extends HttpResponse { status: "200"; - body: PoolUsageMetricsListOutput; + body: BatchApplicationOutput; + headers: RawHttpHeaders & GetApplication200Headers; } -export interface PoolListUsageMetricsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolListUsageMetricsDefaultResponse extends HttpResponse { +export interface GetApplicationDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolListUsageMetricsDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolGetAllPoolLifetimeStatistics200Headers { +export interface ListPoolUsageMetrics200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -111,26 +92,18 @@ export interface PoolGetAllPoolLifetimeStatistics200Headers { } /** The request has succeeded. */ -export interface PoolGetAllPoolLifetimeStatistics200Response - extends HttpResponse { +export interface ListPoolUsageMetrics200Response extends HttpResponse { status: "200"; - body: PoolStatisticsOutput; - headers: RawHttpHeaders & PoolGetAllPoolLifetimeStatistics200Headers; -} - -export interface PoolGetAllPoolLifetimeStatisticsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: PoolListUsageMetricsResultOutput; + headers: RawHttpHeaders & ListPoolUsageMetrics200Headers; } -export interface PoolGetAllPoolLifetimeStatisticsDefaultResponse - extends HttpResponse { +export interface ListPoolUsageMetricsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolGetAllPoolLifetimeStatisticsDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolAddPool201Headers { +export interface CreatePool201Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -139,28 +112,22 @@ export interface PoolAddPool201Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ + /** The OData ID of the resource to which the request applied */ dataserviceid: string; } /** The request has succeeded and a new resource has been created as a result. */ -export interface PoolAddPool201Response extends HttpResponse { +export interface CreatePool201Response extends HttpResponse { status: "201"; - headers: RawHttpHeaders & PoolAddPool201Headers; + headers: RawHttpHeaders & CreatePool201Headers; } -export interface PoolAddPoolDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolAddPoolDefaultResponse extends HttpResponse { +export interface CreatePoolDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolAddPoolDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolListPools200Headers { +export interface ListPools200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -172,45 +139,33 @@ export interface PoolListPools200Headers { } /** The request has succeeded. */ -export interface PoolListPools200Response extends HttpResponse { +export interface ListPools200Response extends HttpResponse { status: "200"; body: BatchPoolListResultOutput; - headers: RawHttpHeaders & PoolListPools200Headers; -} - -export interface PoolListPoolsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ListPools200Headers; } -export interface PoolListPoolsDefaultResponse extends HttpResponse { +export interface ListPoolsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolListPoolsDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolDeletePool202Headers { +export interface DeletePool202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; } -/** The parameters for a widget status request */ -export interface PoolDeletePool202Response extends HttpResponse { +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface DeletePool202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & PoolDeletePool202Headers; -} - -export interface PoolDeletePoolDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & DeletePool202Headers; } -export interface PoolDeletePoolDefaultResponse extends HttpResponse { +export interface DeletePoolDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolDeletePoolDefaultHeaders; + body: BatchErrorOutput; } export interface PoolExists200Headers { @@ -230,23 +185,17 @@ export interface PoolExists200Response extends HttpResponse { headers: RawHttpHeaders & PoolExists200Headers; } -/** The Pool does not exist. */ +/** The server cannot find the requested resource. */ export interface PoolExists404Response extends HttpResponse { status: "404"; } -export interface PoolExistsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - export interface PoolExistsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolExistsDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolGetPool200Headers { +export interface GetPool200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -258,24 +207,18 @@ export interface PoolGetPool200Headers { } /** The request has succeeded. */ -export interface PoolGetPool200Response extends HttpResponse { +export interface GetPool200Response extends HttpResponse { status: "200"; body: BatchPoolOutput; - headers: RawHttpHeaders & PoolGetPool200Headers; -} - -export interface PoolGetPoolDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & GetPool200Headers; } -export interface PoolGetPoolDefaultResponse extends HttpResponse { +export interface GetPoolDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolGetPoolDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolPatchPool200Headers { +export interface UpdatePool200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -289,23 +232,17 @@ export interface PoolPatchPool200Headers { } /** The request has succeeded. */ -export interface PoolPatchPool200Response extends HttpResponse { +export interface UpdatePool200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & PoolPatchPool200Headers; + headers: RawHttpHeaders & UpdatePool200Headers; } -export interface PoolPatchPoolDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolPatchPoolDefaultResponse extends HttpResponse { +export interface UpdatePoolDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolPatchPoolDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolDisableAutoScale200Headers { +export interface DisablePoolAutoScale200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -319,23 +256,17 @@ export interface PoolDisableAutoScale200Headers { } /** The request has succeeded. */ -export interface PoolDisableAutoScale200Response extends HttpResponse { +export interface DisablePoolAutoScale200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & PoolDisableAutoScale200Headers; -} - -export interface PoolDisableAutoScaleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & DisablePoolAutoScale200Headers; } -export interface PoolDisableAutoScaleDefaultResponse extends HttpResponse { +export interface DisablePoolAutoScaleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolDisableAutoScaleDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolEnableAutoScale200Headers { +export interface EnablePoolAutoScale200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -349,23 +280,17 @@ export interface PoolEnableAutoScale200Headers { } /** The request has succeeded. */ -export interface PoolEnableAutoScale200Response extends HttpResponse { +export interface EnablePoolAutoScale200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & PoolEnableAutoScale200Headers; + headers: RawHttpHeaders & EnablePoolAutoScale200Headers; } -export interface PoolEnableAutoScaleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolEnableAutoScaleDefaultResponse extends HttpResponse { +export interface EnablePoolAutoScaleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolEnableAutoScaleDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolEvaluateAutoScale200Headers { +export interface EvaluatePoolAutoScale200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -379,24 +304,18 @@ export interface PoolEvaluateAutoScale200Headers { } /** The request has succeeded. */ -export interface PoolEvaluateAutoScale200Response extends HttpResponse { +export interface EvaluatePoolAutoScale200Response extends HttpResponse { status: "200"; body: AutoScaleRunOutput; - headers: RawHttpHeaders & PoolEvaluateAutoScale200Headers; -} - -export interface PoolEvaluateAutoScaleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & EvaluatePoolAutoScale200Headers; } -export interface PoolEvaluateAutoScaleDefaultResponse extends HttpResponse { +export interface EvaluatePoolAutoScaleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolEvaluateAutoScaleDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolResize202Headers { +export interface ResizePool202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -410,23 +329,17 @@ export interface PoolResize202Headers { } /** The request has been accepted for processing, but processing has not yet completed. */ -export interface PoolResize202Response extends HttpResponse { +export interface ResizePool202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & PoolResize202Headers; + headers: RawHttpHeaders & ResizePool202Headers; } -export interface PoolResizeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface PoolResizeDefaultResponse extends HttpResponse { +export interface ResizePoolDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolResizeDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolStopResize202Headers { +export interface StopPoolResize202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -439,24 +352,18 @@ export interface PoolStopResize202Headers { dataserviceid: string; } -/** The request to the Batch service was successful. */ -export interface PoolStopResize202Response extends HttpResponse { +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface StopPoolResize202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & PoolStopResize202Headers; -} - -export interface PoolStopResizeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & StopPoolResize202Headers; } -export interface PoolStopResizeDefaultResponse extends HttpResponse { +export interface StopPoolResizeDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolStopResizeDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolUpdateProperties204Headers { +export interface ReplacePoolProperties204Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -469,24 +376,18 @@ export interface PoolUpdateProperties204Headers { dataserviceid: string; } -/** The request to the Batch service was successful. */ -export interface PoolUpdateProperties204Response extends HttpResponse { +/** There is no content to send for this request, but the headers may be useful. */ +export interface ReplacePoolProperties204Response extends HttpResponse { status: "204"; - headers: RawHttpHeaders & PoolUpdateProperties204Headers; -} - -export interface PoolUpdatePropertiesDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ReplacePoolProperties204Headers; } -export interface PoolUpdatePropertiesDefaultResponse extends HttpResponse { +export interface ReplacePoolPropertiesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolUpdatePropertiesDefaultHeaders; + body: BatchErrorOutput; } -export interface PoolRemoveNodes202Headers { +export interface RemoveNodes202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -499,24 +400,18 @@ export interface PoolRemoveNodes202Headers { dataserviceid: string; } -/** The request to the Batch service was successful. */ -export interface PoolRemoveNodes202Response extends HttpResponse { +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface RemoveNodes202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & PoolRemoveNodes202Headers; -} - -export interface PoolRemoveNodesDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & RemoveNodes202Headers; } -export interface PoolRemoveNodesDefaultResponse extends HttpResponse { +export interface RemoveNodesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & PoolRemoveNodesDefaultHeaders; + body: BatchErrorOutput; } -export interface AccountListSupportedImages200Headers { +export interface ListSupportedImages200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -528,25 +423,18 @@ export interface AccountListSupportedImages200Headers { } /** The request has succeeded. */ -export interface AccountListSupportedImages200Response extends HttpResponse { +export interface ListSupportedImages200Response extends HttpResponse { status: "200"; body: AccountListSupportedImagesResultOutput; - headers: RawHttpHeaders & AccountListSupportedImages200Headers; -} - -export interface AccountListSupportedImagesDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ListSupportedImages200Headers; } -export interface AccountListSupportedImagesDefaultResponse - extends HttpResponse { +export interface ListSupportedImagesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & AccountListSupportedImagesDefaultHeaders; + body: BatchErrorOutput; } -export interface AccountListPoolNodeCounts200Headers { +export interface ListPoolNodeCounts200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -554,79 +442,36 @@ export interface AccountListPoolNodeCounts200Headers { } /** The request has succeeded. */ -export interface AccountListPoolNodeCounts200Response extends HttpResponse { +export interface ListPoolNodeCounts200Response extends HttpResponse { status: "200"; body: PoolNodeCountsListResultOutput; - headers: RawHttpHeaders & AccountListPoolNodeCounts200Headers; -} - -export interface AccountListPoolNodeCountsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ListPoolNodeCounts200Headers; } -export interface AccountListPoolNodeCountsDefaultResponse extends HttpResponse { +export interface ListPoolNodeCountsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & AccountListPoolNodeCountsDefaultHeaders; + body: BatchErrorOutput; } -export interface JobGetAllJobLifetimeStatistics200Headers { - /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ - "client-request-id"?: string; - /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ - "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; -} - -/** The request has succeeded. */ -export interface JobGetAllJobLifetimeStatistics200Response - extends HttpResponse { - status: "200"; - body: JobStatisticsOutput; - headers: RawHttpHeaders & JobGetAllJobLifetimeStatistics200Headers; -} - -export interface JobGetAllJobLifetimeStatisticsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobGetAllJobLifetimeStatisticsDefaultResponse - extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobGetAllJobLifetimeStatisticsDefaultHeaders; -} - -export interface JobDeleteJob202Headers { +export interface DeleteJob202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; } -/** The parameters for a widget status request */ -export interface JobDeleteJob202Response extends HttpResponse { +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface DeleteJob202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & JobDeleteJob202Headers; -} - -export interface JobDeleteJobDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & DeleteJob202Headers; } -export interface JobDeleteJobDefaultResponse extends HttpResponse { +export interface DeleteJobDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobDeleteJobDefaultHeaders; + body: BatchErrorOutput; } -export interface JobGetJob200Headers { +export interface GetJob200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -638,24 +483,18 @@ export interface JobGetJob200Headers { } /** The request has succeeded. */ -export interface JobGetJob200Response extends HttpResponse { +export interface GetJob200Response extends HttpResponse { status: "200"; body: BatchJobOutput; - headers: RawHttpHeaders & JobGetJob200Headers; + headers: RawHttpHeaders & GetJob200Headers; } -export interface JobGetJobDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobGetJobDefaultResponse extends HttpResponse { +export interface GetJobDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobGetJobDefaultHeaders; + body: BatchErrorOutput; } -export interface JobPatchJob200Headers { +export interface UpdateJob200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -669,23 +508,17 @@ export interface JobPatchJob200Headers { } /** The request has succeeded. */ -export interface JobPatchJob200Response extends HttpResponse { +export interface UpdateJob200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & JobPatchJob200Headers; -} - -export interface JobPatchJobDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & UpdateJob200Headers; } -export interface JobPatchJobDefaultResponse extends HttpResponse { +export interface UpdateJobDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobPatchJobDefaultHeaders; + body: BatchErrorOutput; } -export interface JobUpdateJob200Headers { +export interface ReplaceJob200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -699,23 +532,17 @@ export interface JobUpdateJob200Headers { } /** The request has succeeded. */ -export interface JobUpdateJob200Response extends HttpResponse { +export interface ReplaceJob200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & JobUpdateJob200Headers; + headers: RawHttpHeaders & ReplaceJob200Headers; } -export interface JobUpdateJobDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobUpdateJobDefaultResponse extends HttpResponse { +export interface ReplaceJobDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobUpdateJobDefaultHeaders; + body: BatchErrorOutput; } -export interface JobDisableJob202Headers { +export interface DisableJob202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -729,23 +556,17 @@ export interface JobDisableJob202Headers { } /** The request has been accepted for processing, but processing has not yet completed. */ -export interface JobDisableJob202Response extends HttpResponse { +export interface DisableJob202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & JobDisableJob202Headers; -} - -export interface JobDisableJobDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & DisableJob202Headers; } -export interface JobDisableJobDefaultResponse extends HttpResponse { +export interface DisableJobDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobDisableJobDefaultHeaders; + body: BatchErrorOutput; } -export interface JobEnableJob202Headers { +export interface EnableJob202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -759,23 +580,17 @@ export interface JobEnableJob202Headers { } /** The request has been accepted for processing, but processing has not yet completed. */ -export interface JobEnableJob202Response extends HttpResponse { +export interface EnableJob202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & JobEnableJob202Headers; + headers: RawHttpHeaders & EnableJob202Headers; } -export interface JobEnableJobDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobEnableJobDefaultResponse extends HttpResponse { +export interface EnableJobDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobEnableJobDefaultHeaders; + body: BatchErrorOutput; } -export interface JobTerminateJob202Headers { +export interface TerminateJob202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -789,23 +604,17 @@ export interface JobTerminateJob202Headers { } /** The request has been accepted for processing, but processing has not yet completed. */ -export interface JobTerminateJob202Response extends HttpResponse { +export interface TerminateJob202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & JobTerminateJob202Headers; -} - -export interface JobTerminateJobDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & TerminateJob202Headers; } -export interface JobTerminateJobDefaultResponse extends HttpResponse { +export interface TerminateJobDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobTerminateJobDefaultHeaders; + body: BatchErrorOutput; } -export interface JobAddJob201Headers { +export interface CreateJob201Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -819,23 +628,17 @@ export interface JobAddJob201Headers { } /** The request has succeeded and a new resource has been created as a result. */ -export interface JobAddJob201Response extends HttpResponse { +export interface CreateJob201Response extends HttpResponse { status: "201"; - headers: RawHttpHeaders & JobAddJob201Headers; + headers: RawHttpHeaders & CreateJob201Headers; } -export interface JobAddJobDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobAddJobDefaultResponse extends HttpResponse { +export interface CreateJobDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobAddJobDefaultHeaders; + body: BatchErrorOutput; } -export interface JobListJobs200Headers { +export interface ListJobs200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -847,24 +650,18 @@ export interface JobListJobs200Headers { } /** The request has succeeded. */ -export interface JobListJobs200Response extends HttpResponse { +export interface ListJobs200Response extends HttpResponse { status: "200"; body: BatchJobListResultOutput; - headers: RawHttpHeaders & JobListJobs200Headers; -} - -export interface JobListJobsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ListJobs200Headers; } -export interface JobListJobsDefaultResponse extends HttpResponse { +export interface ListJobsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobListJobsDefaultHeaders; + body: BatchErrorOutput; } -export interface JobListFromJobSchedule200Headers { +export interface ListJobsFromSchedule200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -876,24 +673,18 @@ export interface JobListFromJobSchedule200Headers { } /** The request has succeeded. */ -export interface JobListFromJobSchedule200Response extends HttpResponse { +export interface ListJobsFromSchedule200Response extends HttpResponse { status: "200"; body: BatchJobListResultOutput; - headers: RawHttpHeaders & JobListFromJobSchedule200Headers; + headers: RawHttpHeaders & ListJobsFromSchedule200Headers; } -export interface JobListFromJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobListFromJobScheduleDefaultResponse extends HttpResponse { +export interface ListJobsFromScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobListFromJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobListPreparationAndReleaseTaskStatus200Headers { +export interface ListJobPreparationAndReleaseTaskStatus200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -905,27 +696,20 @@ export interface JobListPreparationAndReleaseTaskStatus200Headers { } /** The request has succeeded. */ -export interface JobListPreparationAndReleaseTaskStatus200Response +export interface ListJobPreparationAndReleaseTaskStatus200Response extends HttpResponse { status: "200"; body: BatchJobListPreparationAndReleaseTaskStatusResultOutput; - headers: RawHttpHeaders & JobListPreparationAndReleaseTaskStatus200Headers; -} - -export interface JobListPreparationAndReleaseTaskStatusDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ListJobPreparationAndReleaseTaskStatus200Headers; } -export interface JobListPreparationAndReleaseTaskStatusDefaultResponse +export interface ListJobPreparationAndReleaseTaskStatusDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & - JobListPreparationAndReleaseTaskStatusDefaultHeaders; + body: BatchErrorOutput; } -export interface JobGetTaskCounts200Headers { +export interface GetJobTaskCounts200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -933,24 +717,18 @@ export interface JobGetTaskCounts200Headers { } /** The request has succeeded. */ -export interface JobGetTaskCounts200Response extends HttpResponse { +export interface GetJobTaskCounts200Response extends HttpResponse { status: "200"; body: TaskCountsResultOutput; - headers: RawHttpHeaders & JobGetTaskCounts200Headers; + headers: RawHttpHeaders & GetJobTaskCounts200Headers; } -export interface JobGetTaskCountsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface JobGetTaskCountsDefaultResponse extends HttpResponse { +export interface GetJobTaskCountsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobGetTaskCountsDefaultHeaders; + body: BatchErrorOutput; } -export interface CertificatesAddCertificate201Headers { +export interface CreateCertificate201Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -959,27 +737,22 @@ export interface CertificatesAddCertificate201Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } /** The request has succeeded and a new resource has been created as a result. */ -export interface CertificatesAddCertificate201Response extends HttpResponse { +export interface CreateCertificate201Response extends HttpResponse { status: "201"; - headers: RawHttpHeaders & CertificatesAddCertificate201Headers; + headers: RawHttpHeaders & CreateCertificate201Headers; } -export interface CertificatesAddCertificateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface CertificatesAddCertificateDefaultResponse - extends HttpResponse { +export interface CreateCertificateDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificatesAddCertificateDefaultHeaders; + body: BatchErrorOutput; } -export interface CertificatesListCertificates200Headers { +export interface ListCertificates200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -991,25 +764,18 @@ export interface CertificatesListCertificates200Headers { } /** The request has succeeded. */ -export interface CertificatesListCertificates200Response extends HttpResponse { +export interface ListCertificates200Response extends HttpResponse { status: "200"; body: CertificateListResultOutput; - headers: RawHttpHeaders & CertificatesListCertificates200Headers; -} - -export interface CertificatesListCertificatesDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ListCertificates200Headers; } -export interface CertificatesListCertificatesDefaultResponse - extends HttpResponse { +export interface ListCertificatesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificatesListCertificatesDefaultHeaders; + body: BatchErrorOutput; } -export interface CertificatesCancelCertificateDeletion204Headers { +export interface CancelCertificateDeletion204Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1023,25 +789,17 @@ export interface CertificatesCancelCertificateDeletion204Headers { } /** There is no content to send for this request, but the headers may be useful. */ -export interface CertificatesCancelCertificateDeletion204Response - extends HttpResponse { +export interface CancelCertificateDeletion204Response extends HttpResponse { status: "204"; - headers: RawHttpHeaders & CertificatesCancelCertificateDeletion204Headers; -} - -export interface CertificatesCancelCertificateDeletionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & CancelCertificateDeletion204Headers; } -export interface CertificatesCancelCertificateDeletionDefaultResponse - extends HttpResponse { +export interface CancelCertificateDeletionDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificatesCancelCertificateDeletionDefaultHeaders; + body: BatchErrorOutput; } -export interface CertificatesDeleteCertificate202Headers { +export interface DeleteCertificate202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1053,24 +811,17 @@ export interface CertificatesDeleteCertificate202Headers { } /** The request has been accepted for processing, but processing has not yet completed. */ -export interface CertificatesDeleteCertificate202Response extends HttpResponse { +export interface DeleteCertificate202Response extends HttpResponse { status: "202"; - headers: RawHttpHeaders & CertificatesDeleteCertificate202Headers; -} - -export interface CertificatesDeleteCertificateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & DeleteCertificate202Headers; } -export interface CertificatesDeleteCertificateDefaultResponse - extends HttpResponse { +export interface DeleteCertificateDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificatesDeleteCertificateDefaultHeaders; + body: BatchErrorOutput; } -export interface CertificatesGetCertificate200Headers { +export interface GetCertificate200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1082,89 +833,63 @@ export interface CertificatesGetCertificate200Headers { } /** The request has succeeded. */ -export interface CertificatesGetCertificate200Response extends HttpResponse { +export interface GetCertificate200Response extends HttpResponse { status: "200"; - body: CertificateOutput; - headers: RawHttpHeaders & CertificatesGetCertificate200Headers; -} - -export interface CertificatesGetCertificateDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: BatchCertificateOutput; + headers: RawHttpHeaders & GetCertificate200Headers; } -export interface CertificatesGetCertificateDefaultResponse - extends HttpResponse { +export interface GetCertificateDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & CertificatesGetCertificateDefaultHeaders; + body: BatchErrorOutput; } -export interface FileDeleteFromTask200Headers { +export interface JobScheduleExists200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; } /** The request has succeeded. */ -export interface FileDeleteFromTask200Response extends HttpResponse { +export interface JobScheduleExists200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & FileDeleteFromTask200Headers; + headers: RawHttpHeaders & JobScheduleExists200Headers; } -export interface FileDeleteFromTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The server cannot find the requested resource. */ +export interface JobScheduleExists404Response extends HttpResponse { + status: "404"; } -export interface FileDeleteFromTaskDefaultResponse extends HttpResponse { +export interface JobScheduleExistsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileDeleteFromTaskDefaultHeaders; + body: BatchErrorOutput; } -export interface FileGetFromTask200Headers { +export interface DeleteJobSchedule202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The file creation time. */ - "ocp-creation-time"?: string; - /** Whether the object represents a directory. */ - "ocp-batch-file-isdirectory": boolean; - /** The URL of the file. */ - "ocp-batch-file-url": string; - /** The file mode attribute in octal format. */ - "ocp-batch-file-mode": string; - /** The length of the file. */ - "content-length": number; -} - -/** The request has succeeded. */ -export interface FileGetFromTask200Response extends HttpResponse { - status: "200"; - /** Value may contain any sequence of octets */ - body: Uint8Array; - headers: RawHttpHeaders & FileGetFromTask200Headers; } -export interface FileGetFromTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface DeleteJobSchedule202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & DeleteJobSchedule202Headers; } -export interface FileGetFromTaskDefaultResponse extends HttpResponse { +export interface DeleteJobScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileGetFromTaskDefaultHeaders; + body: BatchErrorOutput; } -export interface FileGetPropertiesFromTask200Headers { +export interface GetJobSchedule200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1173,60 +898,45 @@ export interface FileGetPropertiesFromTask200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The file creation time. */ - "ocp-creation-time"?: string; - /** Whether the object represents a directory. */ - "ocp-batch-file-isdirectory": boolean; - /** The URL of the file. */ - "ocp-batch-file-url": string; - /** The file mode attribute in octal format. */ - "ocp-batch-file-mode": string; - /** The length of the file. */ - "content-length": number; } /** The request has succeeded. */ -export interface FileGetPropertiesFromTask200Response extends HttpResponse { +export interface GetJobSchedule200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & FileGetPropertiesFromTask200Headers; -} - -export interface FileGetPropertiesFromTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: BatchJobScheduleOutput; + headers: RawHttpHeaders & GetJobSchedule200Headers; } -export interface FileGetPropertiesFromTaskDefaultResponse extends HttpResponse { +export interface GetJobScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileGetPropertiesFromTaskDefaultHeaders; + body: BatchErrorOutput; } -export interface FileDeleteFromComputeNode200Headers { +export interface UpdateJobSchedule200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } /** The request has succeeded. */ -export interface FileDeleteFromComputeNode200Response extends HttpResponse { +export interface UpdateJobSchedule200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & FileDeleteFromComputeNode200Headers; -} - -export interface FileDeleteFromComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & UpdateJobSchedule200Headers; } -export interface FileDeleteFromComputeNodeDefaultResponse extends HttpResponse { +export interface UpdateJobScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileDeleteFromComputeNodeDefaultHeaders; + body: BatchErrorOutput; } -export interface FileGetFromComputeNode200Headers { +export interface ReplaceJobSchedule200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1235,38 +945,22 @@ export interface FileGetFromComputeNode200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The file creation time. */ - "ocp-creation-time"?: string; - /** Whether the object represents a directory. */ - "ocp-batch-file-isdirectory": boolean; - /** The URL of the file. */ - "ocp-batch-file-url": string; - /** The file mode attribute in octal format. */ - "ocp-batch-file-mode": string; - /** The length of the file. */ - "content-length": number; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } /** The request has succeeded. */ -export interface FileGetFromComputeNode200Response extends HttpResponse { +export interface ReplaceJobSchedule200Response extends HttpResponse { status: "200"; - /** Value may contain any sequence of octets */ - body: Uint8Array; - headers: RawHttpHeaders & FileGetFromComputeNode200Headers; -} - -export interface FileGetFromComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & ReplaceJobSchedule200Headers; } -export interface FileGetFromComputeNodeDefaultResponse extends HttpResponse { +export interface ReplaceJobScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileGetFromComputeNodeDefaultHeaders; + body: BatchErrorOutput; } -export interface FileGetPropertiesFromComputeNode200Headers { +export interface DisableJobSchedule204Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1275,38 +969,22 @@ export interface FileGetPropertiesFromComputeNode200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The file creation time. */ - "ocp-creation-time"?: string; - /** Whether the object represents a directory. */ - "ocp-batch-file-isdirectory": boolean; - /** The URL of the file. */ - "ocp-batch-file-url": string; - /** The file mode attribute in octal format. */ - "ocp-batch-file-mode": string; - /** The length of the file. */ - "content-length": number; -} - -/** The request has succeeded. */ -export interface FileGetPropertiesFromComputeNode200Response - extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & FileGetPropertiesFromComputeNode200Headers; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } -export interface FileGetPropertiesFromComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** There is no content to send for this request, but the headers may be useful. */ +export interface DisableJobSchedule204Response extends HttpResponse { + status: "204"; + headers: RawHttpHeaders & DisableJobSchedule204Headers; } -export interface FileGetPropertiesFromComputeNodeDefaultResponse - extends HttpResponse { +export interface DisableJobScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileGetPropertiesFromComputeNodeDefaultHeaders; + body: BatchErrorOutput; } -export interface FileListFromTask200Headers { +export interface EnableJobSchedule204Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1315,27 +993,22 @@ export interface FileListFromTask200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } -/** The request has succeeded. */ -export interface FileListFromTask200Response extends HttpResponse { - status: "200"; - body: NodeFileListResultOutput; - headers: RawHttpHeaders & FileListFromTask200Headers; -} - -export interface FileListFromTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** There is no content to send for this request, but the headers may be useful. */ +export interface EnableJobSchedule204Response extends HttpResponse { + status: "204"; + headers: RawHttpHeaders & EnableJobSchedule204Headers; } -export interface FileListFromTaskDefaultResponse extends HttpResponse { +export interface EnableJobScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileListFromTaskDefaultHeaders; + body: BatchErrorOutput; } -export interface FileListFromComputeNode200Headers { +export interface TerminateJobSchedule202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1344,27 +1017,22 @@ export interface FileListFromComputeNode200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } -/** The request has succeeded. */ -export interface FileListFromComputeNode200Response extends HttpResponse { - status: "200"; - body: NodeFileListResultOutput; - headers: RawHttpHeaders & FileListFromComputeNode200Headers; -} - -export interface FileListFromComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface TerminateJobSchedule202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & TerminateJobSchedule202Headers; } -export interface FileListFromComputeNodeDefaultResponse extends HttpResponse { +export interface TerminateJobScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & FileListFromComputeNodeDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleJobScheduleExists200Headers { +export interface CreateJobSchedule201Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1373,57 +1041,45 @@ export interface JobScheduleJobScheduleExists200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } -/** The request has succeeded. */ -export interface JobScheduleJobScheduleExists200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & JobScheduleJobScheduleExists200Headers; -} - -/** The server cannot find the requested resource. */ -export interface JobScheduleJobScheduleExists404Response extends HttpResponse { - status: "404"; -} - -export interface JobScheduleJobScheduleExistsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded and a new resource has been created as a result. */ +export interface CreateJobSchedule201Response extends HttpResponse { + status: "201"; + headers: RawHttpHeaders & CreateJobSchedule201Headers; } -export interface JobScheduleJobScheduleExistsDefaultResponse - extends HttpResponse { +export interface CreateJobScheduleDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleJobScheduleExistsDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleDeleteJobSchedule202Headers { +export interface ListJobSchedules200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; } -/** The parameters for a widget status request */ -export interface JobScheduleDeleteJobSchedule202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & JobScheduleDeleteJobSchedule202Headers; -} - -export interface JobScheduleDeleteJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface ListJobSchedules200Response extends HttpResponse { + status: "200"; + body: BatchJobScheduleListResultOutput; + headers: RawHttpHeaders & ListJobSchedules200Headers; } -export interface JobScheduleDeleteJobScheduleDefaultResponse - extends HttpResponse { +export interface ListJobSchedulesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleDeleteJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleGetJobSchedule200Headers { +export interface CreateTask201Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1432,27 +1088,22 @@ export interface JobScheduleGetJobSchedule200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } -/** The request has succeeded. */ -export interface JobScheduleGetJobSchedule200Response extends HttpResponse { - status: "200"; - body: BatchJobScheduleOutput; - headers: RawHttpHeaders & JobScheduleGetJobSchedule200Headers; -} - -export interface JobScheduleGetJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded and a new resource has been created as a result. */ +export interface CreateTask201Response extends HttpResponse { + status: "201"; + headers: RawHttpHeaders & CreateTask201Headers; } -export interface JobScheduleGetJobScheduleDefaultResponse extends HttpResponse { +export interface CreateTaskDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleGetJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobSchedulePatchJobSchedule200Headers { +export interface ListTasks200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1461,91 +1112,58 @@ export interface JobSchedulePatchJobSchedule200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; } /** The request has succeeded. */ -export interface JobSchedulePatchJobSchedule200Response extends HttpResponse { +export interface ListTasks200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & JobSchedulePatchJobSchedule200Headers; -} - -export interface JobSchedulePatchJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: BatchTaskListResultOutput; + headers: RawHttpHeaders & ListTasks200Headers; } -export interface JobSchedulePatchJobScheduleDefaultResponse - extends HttpResponse { +export interface ListTasksDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobSchedulePatchJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleUpdateJobSchedule200Headers { +export interface CreateTaskCollection200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; } /** The request has succeeded. */ -export interface JobScheduleUpdateJobSchedule200Response extends HttpResponse { +export interface CreateTaskCollection200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & JobScheduleUpdateJobSchedule200Headers; -} - -export interface JobScheduleUpdateJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: TaskAddCollectionResultOutput; + headers: RawHttpHeaders & CreateTaskCollection200Headers; } -export interface JobScheduleUpdateJobScheduleDefaultResponse - extends HttpResponse { +export interface CreateTaskCollectionDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleUpdateJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleDisableJobSchedule204Headers { +export interface DeleteTask200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** There is no content to send for this request, but the headers may be useful. */ -export interface JobScheduleDisableJobSchedule204Response extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & JobScheduleDisableJobSchedule204Headers; } -export interface JobScheduleDisableJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface DeleteTask200Response extends HttpResponse { + status: "200"; + headers: RawHttpHeaders & DeleteTask200Headers; } -export interface JobScheduleDisableJobScheduleDefaultResponse - extends HttpResponse { +export interface DeleteTaskDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleDisableJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleEnableJobSchedule204Headers { +export interface GetTask200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1558,25 +1176,19 @@ export interface JobScheduleEnableJobSchedule204Headers { dataserviceid: string; } -/** There is no content to send for this request, but the headers may be useful. */ -export interface JobScheduleEnableJobSchedule204Response extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & JobScheduleEnableJobSchedule204Headers; -} - -export interface JobScheduleEnableJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface GetTask200Response extends HttpResponse { + status: "200"; + body: BatchTaskOutput; + headers: RawHttpHeaders & GetTask200Headers; } -export interface JobScheduleEnableJobScheduleDefaultResponse - extends HttpResponse { +export interface GetTaskDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleEnableJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleTerminateJobSchedule202Headers { +export interface ReplaceTask200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1589,26 +1201,18 @@ export interface JobScheduleTerminateJobSchedule202Headers { dataserviceid: string; } -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface JobScheduleTerminateJobSchedule202Response - extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & JobScheduleTerminateJobSchedule202Headers; -} - -export interface JobScheduleTerminateJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface ReplaceTask200Response extends HttpResponse { + status: "200"; + headers: RawHttpHeaders & ReplaceTask200Headers; } -export interface JobScheduleTerminateJobScheduleDefaultResponse - extends HttpResponse { +export interface ReplaceTaskDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleTerminateJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleAddJobSchedule201Headers { +export interface ListSubTasks200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1617,28 +1221,21 @@ export interface JobScheduleAddJobSchedule201Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has succeeded and a new resource has been created as a result. */ -export interface JobScheduleAddJobSchedule201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & JobScheduleAddJobSchedule201Headers; } -export interface JobScheduleAddJobScheduleDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface ListSubTasks200Response extends HttpResponse { + status: "200"; + body: BatchTaskListSubtasksResultOutput; + headers: RawHttpHeaders & ListSubTasks200Headers; } -export interface JobScheduleAddJobScheduleDefaultResponse extends HttpResponse { +export interface ListSubTasksDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleAddJobScheduleDefaultHeaders; + body: BatchErrorOutput; } -export interface JobScheduleListJobSchedules200Headers { +export interface TerminateTask204Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1647,28 +1244,22 @@ export interface JobScheduleListJobSchedules200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } -/** The request has succeeded. */ -export interface JobScheduleListJobSchedules200Response extends HttpResponse { - status: "200"; - body: BatchJobScheduleListResultOutput; - headers: RawHttpHeaders & JobScheduleListJobSchedules200Headers; -} - -export interface JobScheduleListJobSchedulesDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** There is no content to send for this request, but the headers may be useful. */ +export interface TerminateTask204Response extends HttpResponse { + status: "204"; + headers: RawHttpHeaders & TerminateTask204Headers; } -export interface JobScheduleListJobSchedulesDefaultResponse - extends HttpResponse { +export interface TerminateTaskDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & JobScheduleListJobSchedulesDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskAddTask201Headers { +export interface ReactivateTask204Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1681,102 +1272,102 @@ export interface TaskAddTask201Headers { dataserviceid: string; } -/** The request has succeeded and a new resource has been created as a result. */ -export interface TaskAddTask201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & TaskAddTask201Headers; -} - -export interface TaskAddTaskDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** There is no content to send for this request, but the headers may be useful. */ +export interface ReactivateTask204Response extends HttpResponse { + status: "204"; + headers: RawHttpHeaders & ReactivateTask204Headers; } -export interface TaskAddTaskDefaultResponse extends HttpResponse { +export interface ReactivateTaskDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskAddTaskDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskListTasks200Headers { +export interface DeleteTaskFile200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; } /** The request has succeeded. */ -export interface TaskListTasks200Response extends HttpResponse { +export interface DeleteTaskFile200Response extends HttpResponse { status: "200"; - body: BatchTaskListResultOutput; - headers: RawHttpHeaders & TaskListTasks200Headers; + headers: RawHttpHeaders & DeleteTaskFile200Headers; } -export interface TaskListTasksDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface TaskListTasksDefaultResponse extends HttpResponse { +export interface DeleteTaskFileDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskListTasksDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskAddTaskCollection200Headers { +export interface GetTaskFile200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The file creation time. */ + "ocp-creation-time"?: string; + /** Whether the object represents a directory. */ + "ocp-batch-file-isdirectory": boolean; + /** The URL of the file. */ + "ocp-batch-file-url": string; + /** The file mode attribute in octal format. */ + "ocp-batch-file-mode": string; + /** The length of the file. */ + "content-length": number; } /** The request has succeeded. */ -export interface TaskAddTaskCollection200Response extends HttpResponse { +export interface GetTaskFile200Response extends HttpResponse { status: "200"; - body: TaskAddCollectionResultOutput; - headers: RawHttpHeaders & TaskAddTaskCollection200Headers; -} - -export interface TaskAddTaskCollectionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + /** Value may contain any sequence of octets */ + body: Uint8Array; + headers: RawHttpHeaders & GetTaskFile200Headers; } -export interface TaskAddTaskCollectionDefaultResponse extends HttpResponse { +export interface GetTaskFileDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskAddTaskCollectionDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskDeleteTaskCollection200Headers { +export interface GetTaskFileProperties200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The file creation time. */ + "ocp-creation-time"?: string; + /** Whether the object represents a directory. */ + "ocp-batch-file-isdirectory": boolean; + /** The URL of the file. */ + "ocp-batch-file-url": string; + /** The file mode attribute in octal format. */ + "ocp-batch-file-mode": string; + /** The length of the file. */ + "content-length": number; } /** The request has succeeded. */ -export interface TaskDeleteTaskCollection200Response extends HttpResponse { +export interface GetTaskFileProperties200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & TaskDeleteTaskCollection200Headers; -} - -export interface TaskDeleteTaskCollectionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & GetTaskFileProperties200Headers; } -export interface TaskDeleteTaskCollectionDefaultResponse extends HttpResponse { +export interface GetTaskFilePropertiesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskDeleteTaskCollectionDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskGetTaskCollection200Headers { +export interface ListTaskFiles200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1785,29 +1376,21 @@ export interface TaskGetTaskCollection200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; } /** The request has succeeded. */ -export interface TaskGetTaskCollection200Response extends HttpResponse { +export interface ListTaskFiles200Response extends HttpResponse { status: "200"; - body: BatchTaskOutput; - headers: RawHttpHeaders & TaskGetTaskCollection200Headers; -} - -export interface TaskGetTaskCollectionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: NodeFileListResultOutput; + headers: RawHttpHeaders & ListTaskFiles200Headers; } -export interface TaskGetTaskCollectionDefaultResponse extends HttpResponse { +export interface ListTaskFilesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskGetTaskCollectionDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskUpdateTaskCollection200Headers { +export interface CreateNodeUser201Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1820,53 +1403,36 @@ export interface TaskUpdateTaskCollection200Headers { dataserviceid: string; } -/** The request has succeeded. */ -export interface TaskUpdateTaskCollection200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & TaskUpdateTaskCollection200Headers; -} - -export interface TaskUpdateTaskCollectionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded and a new resource has been created as a result. */ +export interface CreateNodeUser201Response extends HttpResponse { + status: "201"; + headers: RawHttpHeaders & CreateNodeUser201Headers; } -export interface TaskUpdateTaskCollectionDefaultResponse extends HttpResponse { +export interface CreateNodeUserDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskUpdateTaskCollectionDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskListSubtasks200Headers { +export interface DeleteNodeUser200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; } /** The request has succeeded. */ -export interface TaskListSubtasks200Response extends HttpResponse { +export interface DeleteNodeUser200Response extends HttpResponse { status: "200"; - body: BatchTaskListSubtasksResultOutput; - headers: RawHttpHeaders & TaskListSubtasks200Headers; -} - -export interface TaskListSubtasksDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & DeleteNodeUser200Headers; } -export interface TaskListSubtasksDefaultResponse extends HttpResponse { +export interface DeleteNodeUserDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskListSubtasksDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskTerminateTaskCollection204Headers { +export interface ReplaceNodeUser200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1879,25 +1445,18 @@ export interface TaskTerminateTaskCollection204Headers { dataserviceid: string; } -/** There is no content to send for this request, but the headers may be useful. */ -export interface TaskTerminateTaskCollection204Response extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & TaskTerminateTaskCollection204Headers; -} - -export interface TaskTerminateTaskCollectionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface ReplaceNodeUser200Response extends HttpResponse { + status: "200"; + headers: RawHttpHeaders & ReplaceNodeUser200Headers; } -export interface TaskTerminateTaskCollectionDefaultResponse - extends HttpResponse { +export interface ReplaceNodeUserDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskTerminateTaskCollectionDefaultHeaders; + body: BatchErrorOutput; } -export interface TaskReactivateTaskCollection204Headers { +export interface GetNode200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1906,29 +1465,21 @@ export interface TaskReactivateTaskCollection204Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; } -/** There is no content to send for this request, but the headers may be useful. */ -export interface TaskReactivateTaskCollection204Response extends HttpResponse { - status: "204"; - headers: RawHttpHeaders & TaskReactivateTaskCollection204Headers; -} - -export interface TaskReactivateTaskCollectionDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface GetNode200Response extends HttpResponse { + status: "200"; + body: BatchNodeOutput; + headers: RawHttpHeaders & GetNode200Headers; } -export interface TaskReactivateTaskCollectionDefaultResponse - extends HttpResponse { +export interface GetNodeDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & TaskReactivateTaskCollectionDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesAddUser201Headers { +export interface RebootNode202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1941,48 +1492,42 @@ export interface ComputeNodesAddUser201Headers { dataserviceid: string; } -/** The request has succeeded and a new resource has been created as a result. */ -export interface ComputeNodesAddUser201Response extends HttpResponse { - status: "201"; - headers: RawHttpHeaders & ComputeNodesAddUser201Headers; -} - -export interface ComputeNodesAddUserDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface RebootNode202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & RebootNode202Headers; } -export interface ComputeNodesAddUserDefaultResponse extends HttpResponse { +export interface RebootNodeDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesAddUserDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesDeleteUser200Headers { +export interface ReimageNode202Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } -/** The request has succeeded. */ -export interface ComputeNodesDeleteUser200Response extends HttpResponse { - status: "200"; - headers: RawHttpHeaders & ComputeNodesDeleteUser200Headers; -} - -export interface ComputeNodesDeleteUserDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface ReimageNode202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & ReimageNode202Headers; } -export interface ComputeNodesDeleteUserDefaultResponse extends HttpResponse { +export interface ReimageNodeDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesDeleteUserDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesUpdateUser200Headers { +export interface DisableNodeScheduling200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -1996,23 +1541,17 @@ export interface ComputeNodesUpdateUser200Headers { } /** The request has succeeded. */ -export interface ComputeNodesUpdateUser200Response extends HttpResponse { +export interface DisableNodeScheduling200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & ComputeNodesUpdateUser200Headers; + headers: RawHttpHeaders & DisableNodeScheduling200Headers; } -export interface ComputeNodesUpdateUserDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodesUpdateUserDefaultResponse extends HttpResponse { +export interface DisableNodeSchedulingDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesUpdateUserDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesGetComputeNode200Headers { +export interface EnableNodeScheduling200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2021,28 +1560,22 @@ export interface ComputeNodesGetComputeNode200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; } /** The request has succeeded. */ -export interface ComputeNodesGetComputeNode200Response extends HttpResponse { +export interface EnableNodeScheduling200Response extends HttpResponse { status: "200"; - body: ComputeNodeOutput; - headers: RawHttpHeaders & ComputeNodesGetComputeNode200Headers; + headers: RawHttpHeaders & EnableNodeScheduling200Headers; } -export interface ComputeNodesGetComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodesGetComputeNodeDefaultResponse - extends HttpResponse { +export interface EnableNodeSchedulingDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesGetComputeNodeDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesRebootComputeNode202Headers { +export interface GetNodeRemoteLoginSettings200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2051,29 +1584,22 @@ export interface ComputeNodesRebootComputeNode202Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface ComputeNodesRebootComputeNode202Response extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & ComputeNodesRebootComputeNode202Headers; } -export interface ComputeNodesRebootComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface GetNodeRemoteLoginSettings200Response extends HttpResponse { + status: "200"; + body: BatchNodeRemoteLoginSettingsResultOutput; + headers: RawHttpHeaders & GetNodeRemoteLoginSettings200Headers; } -export interface ComputeNodesRebootComputeNodeDefaultResponse +export interface GetNodeRemoteLoginSettingsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesRebootComputeNodeDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesReimageComputeNode202Headers { +export interface GetNodeRemoteDesktopFile200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2082,61 +1608,40 @@ export interface ComputeNodesReimageComputeNode202Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; -} - -/** The request has been accepted for processing, but processing has not yet completed. */ -export interface ComputeNodesReimageComputeNode202Response - extends HttpResponse { - status: "202"; - headers: RawHttpHeaders & ComputeNodesReimageComputeNode202Headers; } -export interface ComputeNodesReimageComputeNodeDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface GetNodeRemoteDesktopFile200Response extends HttpResponse { + status: "200"; + body: string; + headers: RawHttpHeaders & GetNodeRemoteDesktopFile200Headers; } -export interface ComputeNodesReimageComputeNodeDefaultResponse - extends HttpResponse { +export interface GetNodeRemoteDesktopFileDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesReimageComputeNodeDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesDisableScheduling200Headers { +export interface UploadNodeLogs200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ "request-id"?: string; - /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ - etag?: string; - /** The time at which the resource was last modified. */ - "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; } /** The request has succeeded. */ -export interface ComputeNodesDisableScheduling200Response extends HttpResponse { +export interface UploadNodeLogs200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & ComputeNodesDisableScheduling200Headers; -} - -export interface ComputeNodesDisableSchedulingDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: UploadBatchServiceLogsResultOutput; + headers: RawHttpHeaders & UploadNodeLogs200Headers; } -export interface ComputeNodesDisableSchedulingDefaultResponse - extends HttpResponse { +export interface UploadNodeLogsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesDisableSchedulingDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesEnableScheduling200Headers { +export interface ListNodes200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2145,29 +1650,21 @@ export interface ComputeNodesEnableScheduling200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; - /** The OData ID of the resource to which the request applied. */ - dataserviceid: string; } /** The request has succeeded. */ -export interface ComputeNodesEnableScheduling200Response extends HttpResponse { +export interface ListNodes200Response extends HttpResponse { status: "200"; - headers: RawHttpHeaders & ComputeNodesEnableScheduling200Headers; + body: BatchNodeListResultOutput; + headers: RawHttpHeaders & ListNodes200Headers; } -export interface ComputeNodesEnableSchedulingDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodesEnableSchedulingDefaultResponse - extends HttpResponse { +export interface ListNodesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesEnableSchedulingDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesGetRemoteLoginSettings200Headers { +export interface GetNodeExtension200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2179,26 +1676,18 @@ export interface ComputeNodesGetRemoteLoginSettings200Headers { } /** The request has succeeded. */ -export interface ComputeNodesGetRemoteLoginSettings200Response - extends HttpResponse { +export interface GetNodeExtension200Response extends HttpResponse { status: "200"; - body: ComputeNodeGetRemoteLoginSettingsResultOutput; - headers: RawHttpHeaders & ComputeNodesGetRemoteLoginSettings200Headers; -} - -export interface ComputeNodesGetRemoteLoginSettingsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: NodeVMExtensionOutput; + headers: RawHttpHeaders & GetNodeExtension200Headers; } -export interface ComputeNodesGetRemoteLoginSettingsDefaultResponse - extends HttpResponse { +export interface GetNodeExtensionDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesGetRemoteLoginSettingsDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesGetRemoteDesktop200Headers { +export interface ListNodeExtensions200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2210,26 +1699,18 @@ export interface ComputeNodesGetRemoteDesktop200Headers { } /** The request has succeeded. */ -export interface ComputeNodesGetRemoteDesktop200Response extends HttpResponse { +export interface ListNodeExtensions200Response extends HttpResponse { status: "200"; - /** Value may contain any sequence of octets */ - body: Uint8Array; - headers: RawHttpHeaders & ComputeNodesGetRemoteDesktop200Headers; -} - -export interface ComputeNodesGetRemoteDesktopDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: NodeVMExtensionListOutput; + headers: RawHttpHeaders & ListNodeExtensions200Headers; } -export interface ComputeNodesGetRemoteDesktopDefaultResponse - extends HttpResponse { +export interface ListNodeExtensionsDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesGetRemoteDesktopDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesUploadBatchServiceLogs200Headers { +export interface DeleteNodeFile200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2237,26 +1718,17 @@ export interface ComputeNodesUploadBatchServiceLogs200Headers { } /** The request has succeeded. */ -export interface ComputeNodesUploadBatchServiceLogs200Response - extends HttpResponse { +export interface DeleteNodeFile200Response extends HttpResponse { status: "200"; - body: UploadBatchServiceLogsResultOutput; - headers: RawHttpHeaders & ComputeNodesUploadBatchServiceLogs200Headers; + headers: RawHttpHeaders & DeleteNodeFile200Headers; } -export interface ComputeNodesUploadBatchServiceLogsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; -} - -export interface ComputeNodesUploadBatchServiceLogsDefaultResponse - extends HttpResponse { +export interface DeleteNodeFileDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesUploadBatchServiceLogsDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodesList200Headers { +export interface GetNodeFile200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2265,27 +1737,31 @@ export interface ComputeNodesList200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The file creation time. */ + "ocp-creation-time"?: string; + /** Whether the object represents a directory. */ + "ocp-batch-file-isdirectory": boolean; + /** The URL of the file. */ + "ocp-batch-file-url": string; + /** The file mode attribute in octal format. */ + "ocp-batch-file-mode": string; + /** The length of the file. */ + "content-length": number; } /** The request has succeeded. */ -export interface ComputeNodesList200Response extends HttpResponse { +export interface GetNodeFile200Response extends HttpResponse { status: "200"; - body: ComputeNodeListResultOutput; - headers: RawHttpHeaders & ComputeNodesList200Headers; -} - -export interface ComputeNodesListDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: string; + headers: RawHttpHeaders & GetNodeFile200Headers; } -export interface ComputeNodesListDefaultResponse extends HttpResponse { +export interface GetNodeFileDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & ComputeNodesListDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodeExtensionsGetComputeNodeExtensions200Headers { +export interface GetNodeFileProperties200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2294,31 +1770,30 @@ export interface ComputeNodeExtensionsGetComputeNodeExtensions200Headers { etag?: string; /** The time at which the resource was last modified. */ "last-modified"?: string; + /** The file creation time. */ + "ocp-creation-time"?: string; + /** Whether the object represents a directory. */ + "ocp-batch-file-isdirectory": boolean; + /** The URL of the file. */ + "ocp-batch-file-url": string; + /** The file mode attribute in octal format. */ + "ocp-batch-file-mode": string; + /** The length of the file. */ + "content-length": number; } /** The request has succeeded. */ -export interface ComputeNodeExtensionsGetComputeNodeExtensions200Response - extends HttpResponse { +export interface GetNodeFileProperties200Response extends HttpResponse { status: "200"; - body: NodeVMExtensionOutput; - headers: RawHttpHeaders & - ComputeNodeExtensionsGetComputeNodeExtensions200Headers; -} - -export interface ComputeNodeExtensionsGetComputeNodeExtensionsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + headers: RawHttpHeaders & GetNodeFileProperties200Headers; } -export interface ComputeNodeExtensionsGetComputeNodeExtensionsDefaultResponse - extends HttpResponse { +export interface GetNodeFilePropertiesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & - ComputeNodeExtensionsGetComputeNodeExtensionsDefaultHeaders; + body: BatchErrorOutput; } -export interface ComputeNodeExtensionsListComputeNodeExtensions200Headers { +export interface ListNodeFiles200Headers { /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ "client-request-id"?: string; /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ @@ -2330,23 +1805,13 @@ export interface ComputeNodeExtensionsListComputeNodeExtensions200Headers { } /** The request has succeeded. */ -export interface ComputeNodeExtensionsListComputeNodeExtensions200Response - extends HttpResponse { +export interface ListNodeFiles200Response extends HttpResponse { status: "200"; - body: NodeVMExtensionListOutput; - headers: RawHttpHeaders & - ComputeNodeExtensionsListComputeNodeExtensions200Headers; -} - -export interface ComputeNodeExtensionsListComputeNodeExtensionsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; + body: NodeFileListResultOutput; + headers: RawHttpHeaders & ListNodeFiles200Headers; } -export interface ComputeNodeExtensionsListComputeNodeExtensionsDefaultResponse - extends HttpResponse { +export interface ListNodeFilesDefaultResponse extends HttpResponse { status: string; - body: ErrorResponse; - headers: RawHttpHeaders & - ComputeNodeExtensionsListComputeNodeExtensionsDefaultHeaders; + body: BatchErrorOutput; } diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/test/public/sampleTest.spec.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/test/public/sampleTest.spec.ts index bce68e4286..97b8e8a02b 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/test/public/sampleTest.spec.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/test/public/sampleTest.spec.ts @@ -3,7 +3,7 @@ import { Recorder } from "@azure-tools/test-recorder"; import { assert } from "chai"; -import { createRecorder } from "./utils/recordedClient"; +import { createRecorder } from "./utils/recordedClient.js"; import { Context } from "mocha"; describe("My test", () => { diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/tsconfig.json b/packages/typespec-test/test/batch_modular/generated/typespec-ts/tsconfig.json index 9ca43fa318..c1c30102d9 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/tsconfig.json +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/tsconfig.json @@ -1,8 +1,8 @@ { "compilerOptions": { "target": "ES2017", - "module": "es6", - "lib": [], + "module": "NodeNext", + "lib": ["esnext", "dom"], "declaration": true, "declarationMap": true, "inlineSources": true, @@ -15,11 +15,13 @@ "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "forceConsistentCasingInFileNames": true, - "moduleResolution": "node", + "moduleResolution": "NodeNext", "allowSyntheticDefaultImports": true, "esModuleInterop": true, "outDir": "./dist-esm", - "declarationDir": "./types" + "declarationDir": "./types", + "rootDir": "." }, + "ts-node": { "esm": true }, "include": ["./src/**/*.ts", "./test/**/*.ts"] } diff --git a/packages/typespec-test/test/batch_modular/skip b/packages/typespec-test/test/batch_modular/skip deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/packages/typespec-test/test/batch_modular/spec/main.tsp b/packages/typespec-test/test/batch_modular/spec/main.tsp index 9fc5c8cbda..ad8b4ca102 100644 --- a/packages/typespec-test/test/batch_modular/spec/main.tsp +++ b/packages/typespec-test/test/batch_modular/spec/main.tsp @@ -1,5 +1,4 @@ import "@typespec/rest"; -import "@typespec/http"; import "@typespec/versioning"; import "./routes.tsp"; @@ -7,19 +6,29 @@ using TypeSpec.Http; using TypeSpec.Versioning; @service({ - title: "BatchServiceClient" + title: "Azure Batch", }) +@versioned(Azure.Batch.Versions) +@doc("Azure Batch provides Cloud-scale job scheduling and compute management.") +@server( + "{endpoint}", + "Azure Batch provides Cloud-scale job scheduling and compute management.", + { + @doc("Batch account endpoint (for example: https://batchaccount.eastus2.batch.azure.com).") + endpoint: url, + } +) @useAuth(AADToken) -@versioned(BatchService.Versions) - -@doc("A client for issuing REST requests to the Azure Batch service.") -namespace BatchService; +namespace Azure.Batch; +@doc("The Azure Batch service version.") enum Versions { - @useDependency(Azure.Core.Versions.v1_0_Preview_1) - v2022_10_01: "2022-10-01.16.0", + @doc("API Version 2023-05-01.17.0") + @useDependency(Azure.Core.Versions.v1_0_Preview_2) + `2023-05-01.17.0`, } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("The Azure Active Directory OAuth2 Flow") model AADToken is OAuth2Auth<[ @@ -28,4 +37,4 @@ model AADToken authorizationUrl: "https://login.microsoftonline.com/common/oauth2/authorize"; scopes: ["https://batch.core.windows.net//.default"]; } - ]>; \ No newline at end of file + ]>; diff --git a/packages/typespec-test/test/batch_modular/spec/models.tsp b/packages/typespec-test/test/batch_modular/spec/models.tsp index b168c6bd4a..4c8781a736 100644 --- a/packages/typespec-test/test/batch_modular/spec/models.tsp +++ b/packages/typespec-test/test/batch_modular/spec/models.tsp @@ -1,18 +1,15 @@ import "@typespec/rest"; -import "@typespec/http"; import "@azure-tools/typespec-azure-core"; using TypeSpec.Reflection; using TypeSpec.Http; using TypeSpec.Rest; -using Autorest; using TypeSpec.Versioning; using Azure.Core; -using OpenAPI; - -namespace BatchService; +namespace Azure.Batch; +@doc("OSType enums") enum OSType { @doc("The Linux operating system.") Linux: "linux", @@ -21,6 +18,7 @@ enum OSType { Windows: "windows", } +@doc("VerificationType enums") enum VerificationType { @doc("The Image is guaranteed to be compatible with the associated Compute Node agent SKU and all Batch features have been confirmed to work as expected.") Verified: "verified", @@ -29,6 +27,7 @@ enum VerificationType { Unverified: "unverified", } +@doc("CertificateState enums") enum CertificateState { @doc("The Certificate is available for use in Pools.") Active: "active", @@ -40,6 +39,7 @@ enum CertificateState { DeleteFailed: "deletefailed", } +@doc("CertificateFormat enums") enum CertificateFormat { @doc("The Certificate is a PFX (PKCS#12) formatted Certificate or Certificate chain.") Pfx: "pfx", @@ -48,6 +48,7 @@ enum CertificateFormat { Cer: "cer", } +@doc("JobScheduleState enums") enum JobScheduleState { @doc("The Job Schedule is active and will create Jobs as per its schedule.") Active: "active", @@ -65,14 +66,16 @@ enum JobScheduleState { Deleting: "deleting", } +@doc("The action the Batch service should take when all Tasks in the Job are in the completed state.") enum OnAllTasksComplete { @doc("Do nothing. The Job remains active unless terminated or disabled by some other means.") NoAction: "noaction", - @doc("") + @doc("Terminate the Job. The Job's terminateReason is set to 'AllTasksComplete'.") TerminateJob: "terminatejob", } +@doc("OnTaskFailure enums") enum OnTaskFailure { @doc("Do nothing. The Job remains active unless terminated or disabled by some other means.") NoAction: "noaction", @@ -81,6 +84,7 @@ enum OnTaskFailure { PerformExitOptionsJobAction: "performexitoptionsjobaction", } +@doc("ContainerWorkingDirectory enums") enum ContainerWorkingDirectory { @doc("Use the standard Batch service Task working directory, which will contain the Task Resource Files populated by Batch.") TaskWorkingDirectory: "taskWorkingDirectory", @@ -89,6 +93,7 @@ enum ContainerWorkingDirectory { ContainerImageDefault: "containerImageDefault", } +@doc("OutputFileUploadCondition enums") enum OutputFileUploadCondition { @doc("Upload the file(s) only after the Task process exits with an exit code of 0.") TaskSuccess: "tasksuccess", @@ -100,6 +105,7 @@ enum OutputFileUploadCondition { TaskCompletion: "taskcompletion", } +@doc("AutoUserScope enums") enum AutoUserScope { @doc("Specifies that the service should create a new user for the Task.") Task: "task", @@ -108,6 +114,7 @@ enum AutoUserScope { Pool: "pool", } +@doc("ElevationLevel enums") enum ElevationLevel { @doc("The user is a standard user without elevated access.") NonAdmin: "nonadmin", @@ -116,6 +123,7 @@ enum ElevationLevel { Admin: "admin", } +@doc("PoolLifetimeOption enums") enum PoolLifetimeOption { @doc("The Pool exists for the lifetime of the Job Schedule. The Batch Service creates the Pool when it creates the first Job on the schedule. You may apply this option only to Job Schedules, not to Jobs.") JobSchedule: "jobschedule", @@ -124,6 +132,7 @@ enum PoolLifetimeOption { Job: "job", } +@doc("CachingType enums") enum CachingType { @doc("The caching mode for the disk is not enabled.") None: "none", @@ -135,6 +144,7 @@ enum CachingType { ReadWrite: "readwrite", } +@doc("StorageAccountType enums") enum StorageAccountType { @doc("The data disk should use standard locally redundant storage.") StandardLRS: "standard_lrs", @@ -143,6 +153,7 @@ enum StorageAccountType { PremiumLRS: "premium_lrs", } +@doc("DiskEncryptionTarget enums") enum DiskEncryptionTarget { @doc("The OS Disk on the compute node is encrypted.") OsDisk: "osdisk", @@ -151,6 +162,7 @@ enum DiskEncryptionTarget { TemporaryDisk: "temporarydisk", } +@doc("NodePlacementPolicyType enums") enum NodePlacementPolicyType { @doc("All nodes in the pool will be allocated in the same region.") Regional: "regional", @@ -159,7 +171,8 @@ enum NodePlacementPolicyType { Zonal: "zonal", } -enum ComputeNodeFillType { +@doc("BatchNodeFillType enums") +enum BatchNodeFillType { @doc("Tasks should be assigned evenly across all Compute Nodes in the Pool.") Spread: "spread", @@ -167,6 +180,7 @@ enum ComputeNodeFillType { Pack: "pack", } +@doc("DynamicVNetAssignmentScope enums") enum DynamicVNetAssignmentScope { @doc("No dynamic VNet assignment is enabled.") None: "none", @@ -175,6 +189,7 @@ enum DynamicVNetAssignmentScope { Job: "job", } +@doc("InboundEndpointProtocol enums") enum InboundEndpointProtocol { @doc("Use TCP for the endpoint.") Tcp: "tcp", @@ -183,6 +198,7 @@ enum InboundEndpointProtocol { Udp: "udp", } +@doc("NetworkSecurityGroupRuleAccess enums") enum NetworkSecurityGroupRuleAccess { @doc("Allow access.") Allow: "allow", @@ -191,6 +207,8 @@ enum NetworkSecurityGroupRuleAccess { Deny: "deny", } +@doc("IPAddressProvisioningType enums") +@projectedName("client", "IpAddressProvisioningType") enum IPAddressProvisioningType { @doc("A public IP will be created and managed by Batch. There may be multiple public IPs depending on the size of the Pool.") BatchManaged: "batchmanaged", @@ -199,23 +217,32 @@ enum IPAddressProvisioningType { UserManaged: "usermanaged", @doc("No public IP Address will be created.") + @projectedName("client", "NoPublicIpAddresses") NoPublicIPAddresses: "nopublicipaddresses", } -enum DiffDiskPlacement{ +@doc("AccessDiffDiskPlacementScope enums") +enum DiffDiskPlacement { @doc("The Ephemeral OS Disk is stored on the VM cache.") CacheDisk: "cachedisk", } +@doc("ContainerType enums") enum ContainerType { @doc("A Docker compatible container technology will be used to launch the containers.") DockerCompatible: "dockerCompatible", + + @doc("A CRI based technology will be used to launch the containers.") + CriCompatible: "criCompatible", } + +@doc("AccessScope enums") enum AccessScope { @doc("Grants access to perform all operations on the Job containing the Task.") Job: "job", } +@doc("CertificateStoreLocation enums") enum CertificateStoreLocation { @doc("Certificates should be installed to the CurrentUser Certificate store.") CurrentUser: "currentuser", @@ -224,6 +251,7 @@ enum CertificateStoreLocation { LocalMachine: "localmachine", } +@doc("CertificateVisibility enums") enum CertificateVisibility { @doc("The Certificate should be visible to the user account under which the StartTask is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this certificate will be visible to the Task as well.") StartTask: "starttask", @@ -235,6 +263,7 @@ enum CertificateVisibility { RemoteUser: "remoteuser", } +@doc("LoginMode enums") enum LoginMode { @doc("The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes.") Batch: "batch", @@ -243,6 +272,7 @@ enum LoginMode { Interactive: "interactive", } +@doc("NodeCommunicationMode enums") enum NodeCommunicationMode { @doc("The node communication mode is automatically set by the Batch service.") Default: "default", @@ -254,6 +284,7 @@ enum NodeCommunicationMode { Simplified: "simplified", } +@doc("JobState enums") enum JobState { @doc("The Job is available to have Tasks scheduled.") Active: "active", @@ -277,6 +308,7 @@ enum JobState { Deleting: "deleting", } +@doc("ErrorCategory enums") enum ErrorCategory { @doc("The error is due to a user issue, such as misconfiguration.") UserError: "usererror", @@ -285,6 +317,7 @@ enum ErrorCategory { ServerError: "servererror", } +@doc("DisableJobOption enums") enum DisableJobOption { @doc("Terminate running Tasks and requeue them. The Tasks will run again when the Job is enabled.") Requeue: "requeue", @@ -296,6 +329,7 @@ enum DisableJobOption { Wait: "wait", } +@doc("JobPreparationTaskState enums") enum JobPreparationTaskState { @doc("The Task is currently running (including retrying).") Running: "running", @@ -304,6 +338,7 @@ enum JobPreparationTaskState { Completed: "completed", } +@doc("TaskExecutionResult enums") enum TaskExecutionResult { @doc("The Task ran successfully.") Success: "success", @@ -312,6 +347,7 @@ enum TaskExecutionResult { Failure: "failure", } +@doc("JobReleaseTaskState enums") enum JobReleaseTaskState { @doc("The Task is currently running (including retrying).") Running: "running", @@ -320,6 +356,7 @@ enum JobReleaseTaskState { Completed: "completed", } +@doc("PoolState enums") enum PoolState { @doc("The Pool is available to run Tasks subject to the availability of Compute Nodes.") Active: "active", @@ -328,6 +365,7 @@ enum PoolState { Deleting: "deleting", } +@doc("AllocationState enums") enum AllocationState { @doc("The Pool is not resizing. There are no changes to the number of Compute Nodes in the Pool in progress. A Pool enters this state when it is created and when no operations are being performed on the Pool to change the number of Compute Nodes.") Steady: "steady", @@ -339,15 +377,17 @@ enum AllocationState { Stopping: "stopping", } +@doc("PoolIdentityType enums") enum PoolIdentityType { @doc("Batch pool has user assigned identities with it.") - "UserAssigned", + UserAssigned, @doc("Batch pool has no identity associated with it. Setting `None` in update pool will remove existing identities.") - "None", + None, } -enum ComputeNodeDeallocationOption { +@doc("BatchNodeDeallocationOption enums") +enum BatchNodeDeallocationOption { @doc("Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Remove Compute Nodes as soon as Tasks have been terminated.") Requeue: "requeue", @@ -361,6 +401,7 @@ enum ComputeNodeDeallocationOption { RetainedData: "retaineddata", } +@doc("JobAction enums") enum JobAction { @doc("Take no action.") None: "none", @@ -372,6 +413,7 @@ enum JobAction { Terminate: "terminate", } +@doc("DependencyAction enums") enum DependencyAction { @doc("Satisfy tasks waiting on this task; once all dependencies are satisfied, the task will be scheduled to run.") Satisfy: "satisfy", @@ -380,6 +422,7 @@ enum DependencyAction { Block: "block", } +@doc("TaskState enums") enum TaskState { @doc("The Task is queued and able to run, but is not currently assigned to a Compute Node. A Task enters this state when it is created, when it is enabled after being disabled, or when it is awaiting a retry after a failed run.") Active: "active", @@ -394,9 +437,10 @@ enum TaskState { Completed: "completed", } +@doc("TaskAddStatus enums") enum TaskAddStatus { @doc("The Task was added successfully.") - Success: "success", + Success: "Success", @doc("The Task failed to add due to a client error and should not be retried without modifying the request as appropriate.") ClientError: "clienterror", @@ -405,6 +449,7 @@ enum TaskAddStatus { ServerError: "servererror", } +@doc("SubtaskState enums") enum SubtaskState { @doc("The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node.") Preparing: "preparing", @@ -416,7 +461,8 @@ enum SubtaskState { Completed: "completed", } -enum ComputeNodeState { +@doc("BatchNodeState enums") +enum BatchNodeState { @doc("The Compute Node is not currently running a Task.") Idle: "idle", @@ -457,6 +503,7 @@ enum ComputeNodeState { Preempted: "preempted", } +@doc("SchedulingState enums") enum SchedulingState { @doc("Tasks can be scheduled on the Compute Node.") Enabled: "enabled", @@ -465,6 +512,7 @@ enum SchedulingState { Disabled: "disabled", } +@doc("StartTaskState enums") enum StartTaskState { @doc("The StartTask is currently running.") Running: "running", @@ -473,7 +521,8 @@ enum StartTaskState { Completed: "completed", } -enum ComputeNodeRebootOption { +@doc("BatchNodeRebootOption enums") +enum BatchNodeRebootOption { @doc("Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Restart the Compute Node as soon as Tasks have been terminated.") Requeue: "requeue", @@ -487,7 +536,8 @@ enum ComputeNodeRebootOption { RetainedData: "retaineddata", } -enum ComputeNodeReimageOption { +@doc("BatchNodeReimageOption enums") +enum BatchNodeReimageOption { @doc("Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Reimage the Compute Node as soon as Tasks have been terminated.") Requeue: "requeue", @@ -501,7 +551,8 @@ enum ComputeNodeReimageOption { RetainedData: "retaineddata", } -enum DisableComputeNodeSchedulingOption { +@doc("DisableBatchNodeSchedulingOption enums") +enum DisableBatchNodeSchedulingOption { @doc("Terminate running Task processes and requeue the Tasks. The Tasks may run again on other Compute Nodes, or when Task scheduling is re-enabled on this Compute Node. Enter offline state as soon as Tasks have been terminated.") Requeue: "requeue", @@ -512,38 +563,34 @@ enum DisableComputeNodeSchedulingOption { TaskCompletion: "taskcompletion", } +@doc("Level code.") enum StatusLevelTypes { - @doc("") - "Error", + @doc("Error") + Error, - @doc("") - "Info", + @doc("Info") + Info, - @doc("") - "Warning", + @doc("Warning") + Warning, } - @doc("The result of listing the applications available in an Account.") @pagedResult model ApplicationListResult { @doc("The list of applications available in the Account.") @items - value?: Application[]; + value?: BatchApplication[]; #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc("Contains information about an application in an Azure Batch Account.") -@resource("applications") -model Application { +model BatchApplication { @doc("A string that uniquely identifies the application within the Account.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key ("applicationId") id: string; @doc("The display name for the application.") @@ -556,13 +603,10 @@ model Application { @doc("An error response received from the Azure Batch service.") @error model BatchError { - @doc(""" -An identifier for the error. Codes are invariant and are intended to be -consumed programmatically. -""") - code?: string; + @doc("An identifier for the error. Codes are invariant and are intended to be consumed programmatically.") + code: string; - @doc("An error message received in an Azure Batch error response.") + @doc("A message describing the error, intended to be suitable for display in a user interface.") message?: ErrorMessage; @doc("A collection of key-value pairs containing additional details about the error.") @@ -571,7 +615,7 @@ consumed programmatically. @doc("An error message received in an Azure Batch error response.") model ErrorMessage { - @doc("The language code of the error message") + @doc("The language code of the error message.") lang?: string; @doc("The text of the message.") @@ -597,16 +641,12 @@ model PoolListUsageMetricsResult { #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc("Usage metrics for a Pool across an aggregation interval.") -@resource("poolusagemetrics") model PoolUsageMetrics { @doc("The ID of the Pool whose metrics are aggregated in this entry.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key poolId: string; @doc("The start time of the aggregation interval covered by this entry.") @@ -615,11 +655,7 @@ model PoolUsageMetrics { @doc("The end time of the aggregation interval covered by this entry.") endTime: utcDateTime; - @doc(""" -For information about available sizes of virtual machines in Pools, see Choose -a VM size for Compute Nodes in an Azure Batch Pool -(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). -""") + @doc("The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).") vmSize: string; @doc("The total core hours used in the Pool during this aggregation interval.") @@ -636,49 +672,32 @@ model AccountListSupportedImagesResult { #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc(""" A reference to the Azure Virtual Machines Marketplace Image and additional information about the Image. """) -@resource("supportedimages") model ImageInformation { #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The ID of the Compute Node agent SKU which the Image supports.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key + @projectedName("client", "nodeAgentSkuId") nodeAgentSKUId: string; - @doc(""" -A reference to an Azure Virtual Machines Marketplace Image or a Shared Image -Gallery Image. To get the list of all Azure Marketplace Image references -verified by Azure Batch, see the 'List Supported Images' operation. -""") + @doc("The reference to the Azure Virtual Machine's Marketplace Image.") imageReference: ImageReference; @doc("The type of operating system (e.g. Windows or Linux) of the Image.") osType: OSType; - @doc(""" -Not every capability of the Image is listed. Capabilities in this list are -considered of special interest and are generally related to integration with -other features in the Azure Batch service. -""") + @doc("The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service.") capabilities?: string[]; - @doc(""" -The time when the Azure Batch service will stop accepting create Pool requests -for the Image. -""") + @doc("The time when the Azure Batch service will stop accepting create Pool requests for the Image.") batchSupportEndOfLife?: utcDateTime; - @doc(""" -Whether the Azure Batch service actively verifies that the Image is compatible -with the associated Compute Node agent SKU. -""") + @doc("Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU.") verificationType: VerificationType; } @@ -688,37 +707,22 @@ Gallery Image. To get the list of all Azure Marketplace Image references verified by Azure Batch, see the 'List Supported Images' operation. """) model ImageReference { - @doc("For example, Canonical or MicrosoftWindowsServer.") + @doc("The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer.") publisher?: string; - @doc("For example, UbuntuServer or WindowsServer.") + @doc("The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer.") offer?: string; - @doc("For example, 18.04-LTS or 2019-Datacenter.") + @doc("The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter.") sku?: string; - @doc(""" -A value of 'latest' can be specified to select the latest version of an Image. -If omitted, the default is 'latest'. -""") + @doc("The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'.") version?: string; - @doc(""" -This property is mutually exclusive with other ImageReference properties. The -Shared Image Gallery Image must have replicas in the same region and must be in -the same subscription as the Azure Batch account. If the image version is not -specified in the imageId, the latest version will be used. For information -about the firewall settings for the Batch Compute Node agent to communicate -with the Batch service see -https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. -""") + @doc("The ARM resource identifier of the Shared Image Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Shared Image Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.") virtualMachineImageId?: string; - @doc(""" -The specific version of the platform image or marketplace image used to create -the node. This read-only field differs from 'version' only if the value -specified for 'version' when the pool was created was 'latest'. -""") + @doc("The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'.") @visibility("read") exactVersion?: string; } @@ -730,24 +734,21 @@ model PoolNodeCountsListResult { @items value?: PoolNodeCounts[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc("The number of Compute Nodes in each state for a Pool.") -@resource("nodecounts") model PoolNodeCounts { @doc("The ID of the Pool.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key poolId: string; - @doc("The number of Compute Nodes in each Compute Node state.") + @doc("The number of dedicated Compute Nodes in each state.") dedicated?: NodeCounts; - @doc("The number of Compute Nodes in each Compute Node state.") + @doc("The number of Spot/Low-priority Compute Nodes in each state.") lowPriority?: NodeCounts; } @@ -784,7 +785,7 @@ model NodeCounts { leavingPool: int32; @doc("The number of Compute Nodes in the unknown state.") - "unknown": int32; + `unknown`: int32; @doc("The number of Compute Nodes in the unusable state.") unusable: int32; @@ -799,24 +800,18 @@ model NodeCounts { @doc("Contains utilization and resource usage statistics for the lifetime of a Pool.") model PoolStatistics { @doc("The URL for the statistics.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key url: string; @doc("The start time of the time range covered by the statistics.") startTime: utcDateTime; - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") + @doc("The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime.") lastUpdateTime: utcDateTime; - @doc("Statistics related to Pool usage information.") + @doc("Statistics related to Pool usage, such as the amount of core-time used.") usageStats?: UsageStatistics; - @doc("Statistics related to resource consumption by Compute Nodes in a Pool.") + @doc("Statistics related to resource consumption by Compute Nodes in the Pool.") resourceStats?: ResourceStatistics; } @@ -825,34 +820,24 @@ model UsageStatistics { @doc("The start time of the time range covered by the statistics.") startTime: utcDateTime; - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") + @doc("The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime.") lastUpdateTime: utcDateTime; - @doc(""" -The aggregated wall-clock time of the dedicated Compute Node cores being part -of the Pool. -""") + @doc("The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool.") dedicatedCoreTime: duration; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("Statistics related to resource consumption by Compute Nodes in a Pool.") model ResourceStatistics { @doc("The start time of the time range covered by the statistics.") startTime: utcDateTime; - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") + @doc("The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime.") lastUpdateTime: utcDateTime; - @doc(""" -The average CPU usage across all Compute Nodes in the Pool (percentage per -node). -""") + @doc("The average CPU usage across all Compute Nodes in the Pool (percentage per node).") + @projectedName("client", "avgCpuPercentage") avgCPUPercentage: float32; @doc("The average memory usage in GiB across all Compute Nodes in the Pool.") @@ -873,66 +858,38 @@ node). @doc("The total number of disk write operations across all Compute Nodes in the Pool.") diskWriteIOps: int32; - @doc(""" -The total amount of data in GiB of disk reads across all Compute Nodes in the -Pool. -""") + @doc("The total amount of data in GiB of disk reads across all Compute Nodes in the Pool.") diskReadGiB: float32; - @doc(""" -The total amount of data in GiB of disk writes across all Compute Nodes in the -Pool. -""") + @doc("The total amount of data in GiB of disk writes across all Compute Nodes in the Pool.") diskWriteGiB: float32; - @doc(""" -The total amount of data in GiB of network reads across all Compute Nodes in -the Pool. -""") + @doc("The total amount of data in GiB of network reads across all Compute Nodes in the Pool.") networkReadGiB: float32; - @doc(""" -The total amount of data in GiB of network writes across all Compute Nodes in -the Pool. -""") + @doc("The total amount of data in GiB of network writes across all Compute Nodes in the Pool.") networkWriteGiB: float32; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("Resource usage statistics for a Job.") model JobStatistics { @doc("The URL of the statistics.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key url: string; @doc("The start time of the time range covered by the statistics.") startTime: utcDateTime; - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") + @doc("The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime.") lastUpdateTime: utcDateTime; - @doc(""" -The total user mode CPU time (summed across all cores and all Compute Nodes) -consumed by all Tasks in the Job. -""") + @doc("The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job.") userCPUTime: duration; - @doc(""" -The total kernel mode CPU time (summed across all cores and all Compute Nodes) -consumed by all Tasks in the Job. -""") + @doc("The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job.") kernelCPUTime: duration; - @doc(""" - The wall clock time is the elapsed time from when the Task started running on -a Compute Node to when it finished (or to the last time the statistics were -updated, if the Task had not finished by then). If a Task was retried, this -includes the wall clock time of all the Task retries. -""") + @doc("The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries.") wallClockTime: duration; @doc("The total number of disk read operations made by all Tasks in the Job.") @@ -947,28 +904,16 @@ includes the wall clock time of all the Task retries. @doc("The total amount of data in GiB written to disk by all Tasks in the Job.") writeIOGiB: float32; - @doc("A Task completes successfully if it returns exit code 0.") + @doc("The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0.") numSucceededTasks: int32; - @doc(""" -A Task fails if it exhausts its maximum retry count without returning exit code -0. -""") + @doc("The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0.") numFailedTasks: int32; - @doc(""" -The total number of retries on all the Tasks in the Job during the given time -range. -""") + @doc("The total number of retries on all the Tasks in the Job during the given time range.") numTaskRetries: int32; - @doc(""" -The wait time for a Task is defined as the elapsed time between the creation of -the Task and the start of Task execution. (If the Task is retried due to -failures, the wait time is the time to the most recent Task execution.) This -value is only reported in the Account lifetime statistics; it is not included -in the Job statistics. -""") + @doc("The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics.") waitTime: duration; } @@ -976,15 +921,12 @@ in the Job statistics. A Certificate that can be installed on Compute Nodes and can be used to authenticate operations on the machine. """) -model Certificate { - @doc(""" -The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex -digits. -""") - thumbprint?: string; +model BatchCertificate { + @doc("The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed).") + thumbprint: string; - @doc("The algorithm used to derive the thumbprint.") - thumbprintAlgorithm?: string; + @doc("The algorithm used to derive the thumbprint. This must be sha1.") + thumbprintAlgorithm: string; @doc("The URL of the Certificate.") @visibility("read") @@ -998,51 +940,44 @@ digits. @visibility("read") stateTransitionTime?: utcDateTime; - @doc("This property is not set if the Certificate is in its initial active state.") + @doc("The previous state of the Certificate. This property is not set if the Certificate is in its initial active state.") @visibility("read") previousState?: CertificateState; - @doc("This property is not set if the Certificate is in its initial Active state.") + @doc("The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state.") @visibility("read") previousStateTransitionTime?: utcDateTime; @doc("The public part of the Certificate as a base-64 encoded .cer file.") @visibility("read") - publicData?: string; + publicData?: bytes; - @doc("This property is set only if the Certificate is in the DeleteFailed state.") + @doc("The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state.") @visibility("read") deleteCertificateError?: DeleteCertificateError; @doc("The base64-encoded contents of the Certificate. The maximum size is 10KB.") - data?: string; + @visibility("create") + data: bytes; @doc("The format of the Certificate data.") + @visibility("create") certificateFormat?: CertificateFormat; - @doc("This must be omitted if the Certificate format is cer.") + @doc("The password to access the Certificate's private key. This must be omitted if the Certificate format is cer.") + @visibility("create") password?: string; } @doc("An error encountered by the Batch service when deleting a Certificate.") model DeleteCertificateError { - @doc(""" -An identifier for the Certificate deletion error. Codes are invariant and are -intended to be consumed programmatically. -""") + @doc("An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically.") code?: string; - @doc(""" -A message describing the Certificate deletion error, intended to be suitable -for display in a user interface. -""") + @doc("A message describing the Certificate deletion error, intended to be suitable for display in a user interface.") message?: string; - @doc(""" -This list includes details such as the active Pools and Compute Nodes -referencing this Certificate. However, if a large number of resources reference -the Certificate, the list contains only about the first hundred. -""") + @doc("A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred.") values?: NameValuePair[]; } @@ -1060,11 +995,12 @@ model NameValuePair { model CertificateListResult { @doc("The list of Certificates.") @items - value?: Certificate[]; + value?: BatchCertificate[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc(""" @@ -1077,9 +1013,10 @@ model NodeFileListResult { @items value?: NodeFile[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc("Information about a file or directory on a Compute Node.") @@ -1093,13 +1030,13 @@ model NodeFile { @doc("Whether the object represents a directory.") isDirectory?: boolean; - @doc("The properties of a file on a Compute Node.") + @doc("The file properties.") properties?: FileProperties; } @doc("The properties of a file on a Compute Node.") model FileProperties { - @doc("The creation time is not returned for files on Linux Compute Nodes.") + @doc("The file creation time. The creation time is not returned for files on Linux Compute Nodes.") creationTime?: utcDateTime; @doc("The time at which the file was last modified.") @@ -1111,7 +1048,7 @@ model FileProperties { @doc("The content type of the file.") contentType?: string; - @doc("The file mode is returned only for files on Linux Compute Nodes.") + @doc("The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes.") fileMode?: string; } @@ -1121,31 +1058,22 @@ specification used to create each Job. """) model BatchJobSchedule { @doc("A string that uniquely identifies the schedule within the Account.") - @visibility("read","create") + @visibility("read") id?: string; @doc("The display name for the schedule.") - @visibility("read","create") + @visibility("read") displayName?: string; @doc("The URL of the Job Schedule.") @visibility("read") url?: string; - @doc(""" -This is an opaque string. You can use it to detect whether the Job Schedule has -changed between requests. In particular, you can be pass the ETag with an -Update Job Schedule request to specify that your changes should take effect -only if nobody else has modified the schedule in the meantime. -""") + @doc("The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime.") @visibility("read") eTag?: string; - @doc(""" -This is the last time at which the schedule level data, such as the Job -specification or recurrence information, changed. It does not factor in -job-level changes such as new Jobs being created or Jobs changing state. -""") + @doc("The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state.") @visibility("read") lastModified?: utcDateTime; @@ -1153,7 +1081,7 @@ job-level changes such as new Jobs being created or Jobs changing state. @visibility("read") creationTime?: utcDateTime; - @doc("The state of the Job Schedule.") + @doc("The current state of the Job Schedule.") @visibility("read") state?: JobScheduleState; @@ -1161,240 +1089,149 @@ job-level changes such as new Jobs being created or Jobs changing state. @visibility("read") stateTransitionTime?: utcDateTime; - @doc("This property is not present if the Job Schedule is in its initial active state.") + @doc("The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state.") @visibility("read") previousState?: JobScheduleState; - @doc("This property is not present if the Job Schedule is in its initial active state.") + @doc("The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state.") @visibility("read") previousStateTransitionTime?: utcDateTime; - @doc(""" -All times are fixed respective to UTC and are not impacted by daylight saving -time. -""") - schedule?: Schedule; + @doc("The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time.") + schedule: Schedule; - @doc("Specifies details of the Jobs to be created on a schedule.") - jobSpecification?: JobSpecification; + @doc("The details of the Jobs to be created on this schedule.") + jobSpecification: JobSpecification; - @doc(""" -Contains information about Jobs that have been and will be run under a Job -Schedule. -""") + @doc("Information about Jobs that have been and will be run under this schedule.") @visibility("read") executionInfo?: JobScheduleExecutionInformation; - @doc(""" -The Batch service does not assign any meaning to metadata; it is solely for the -use of user code. -""") + @doc("A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.") metadata?: MetadataItem[]; - @doc("Resource usage statistics for a Job Schedule.") + @doc("The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.") @visibility("read") stats?: JobScheduleStatistics; } +@doc(""" +Options for updating an Azure Batch Job Schedule. +""") +model BatchJobScheduleUpdateOptions { + @doc("The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. If you do not specify this element, the existing schedule is left unchanged.") + schedule?: Schedule; + + @doc("The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification.") + jobSpecification?: JobSpecification; + + @doc("A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged.") + metadata?: MetadataItem[]; +} + +@doc(""" +Options for creating an Azure Batch Job Schedule +""") +model BatchJobScheduleCreateOptions { + @doc("A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).") + id: string; + + @doc("The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.") + displayName?: string; + + @doc("The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time.") + schedule: Schedule; + + @doc("The details of the Jobs to be created on this schedule.") + jobSpecification: JobSpecification; + + @doc("A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.") + metadata?: MetadataItem[]; +} + @doc(""" The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. """) model Schedule { - @doc(""" -If you do not specify a doNotRunUntil time, the schedule becomes ready to -create Jobs immediately. -""") + @doc("The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately.") doNotRunUntil?: utcDateTime; - @doc(""" -If you do not specify a doNotRunAfter time, and you are creating a recurring -Job Schedule, the Job Schedule will remain active until you explicitly -terminate it. -""") + @doc("A time after which no Job will be created under this Job Schedule. The schedule will move to the completed state as soon as this deadline is past and there is no active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it.") doNotRunAfter?: utcDateTime; - @doc(""" -If a Job is not created within the startWindow interval, then the 'opportunity' -is lost; no Job will be created until the next recurrence of the schedule. If -the schedule is recurring, and the startWindow is longer than the recurrence -interval, then this is equivalent to an infinite startWindow, because the Job -that is 'due' in one recurrenceInterval is not carried forward into the next -recurrence interval. The default is infinite. The minimum value is 1 minute. If -you specify a lower value, the Batch service rejects the schedule with an -error; if you are calling the REST API directly, the HTTP status code is 400 -(Bad Request). -""") + @doc("The time interval, starting from the time at which the schedule indicates a Job should be created, within which a Job must be created. If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") startWindow?: duration; - @doc(""" -Because a Job Schedule can have at most one active Job under it at any given -time, if it is time to create a new Job under a Job Schedule, but the previous -Job is still running, the Batch service will not create the new Job until the -previous Job finishes. If the previous Job does not finish within the -startWindow period of the new recurrenceInterval, then no new Job will be -scheduled for that interval. For recurring Jobs, you should normally specify a -jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you -will need an external process to monitor when Jobs are created, add Tasks to -the Jobs and terminate the Jobs ready for the next recurrence. The default is -that the schedule does not recur: one Job is created, within the startWindow -after the doNotRunUntil time, and the schedule is complete as soon as that Job -finishes. The minimum value is 1 minute. If you specify a lower value, the -Batch service rejects the schedule with an error; if you are calling the REST -API directly, the HTTP status code is 400 (Bad Request). -""") + @doc("The time interval between the start times of two successive Jobs under the Job Schedule. A Job Schedule can have at most one active Job under it at any given time. Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") recurrenceInterval?: duration; } @doc("Specifies details of the Jobs to be created on a schedule.") model JobSpecification { - @doc(""" -Priority values can range from -1000 to 1000, with -1000 being the lowest -priority and 1000 being the highest priority. The default value is 0. This -priority is used as the default for all Jobs under the Job Schedule. You can -update a Job's priority after it has been created using by using the update Job -API. -""") + @doc("The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API.") priority?: int32; - @doc(""" -If the value is set to True, other high priority jobs submitted to the system -will take precedence and will be able requeue tasks from this job. You can -update a job's allowTaskPreemption after it has been created using the update -job API. -""") + @doc("Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API.") allowTaskPreemption?: boolean; - @doc(""" -The value of maxParallelTasks must be -1 or greater than 0 if specified. If not -specified, the default value is -1, which means there's no limit to the number -of tasks that can be run at once. You can update a job's maxParallelTasks after -it has been created using the update job API. -""") + @doc("The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API.") maxParallelTasks?: int32; - @doc(""" -The name need not be unique and can contain any Unicode characters up to a -maximum length of 1024. -""") + @doc("The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024.") displayName?: string; - @doc(""" -Whether Tasks in the Job can define dependencies on each other. The default is -false. -""") + @doc("Whether Tasks in the Job can define dependencies on each other. The default is false.") usesTaskDependencies?: boolean; - @doc(""" -Note that if a Job contains no Tasks, then all Tasks are considered complete. -This option is therefore most commonly used with a Job Manager task; if you -want to use automatic Job termination without a Job Manager, you should -initially set onAllTasksComplete to noaction and update the Job properties to -set onAllTasksComplete to terminatejob once you have finished adding Tasks. The -default is noaction. -""") + @doc("The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction.") onAllTasksComplete?: OnAllTasksComplete; - @doc("The default is noaction.") + @doc("The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction.") onTaskFailure?: OnTaskFailure; @doc("The network configuration for the Job.") networkConfiguration?: JobNetworkConfiguration; - @doc("The execution constraints for a Job.") + @doc("The execution constraints for Jobs created under this schedule.") constraints?: JobConstraints; - @doc(""" -If the Job does not specify a Job Manager Task, the user must explicitly add -Tasks to the Job using the Task API. If the Job does specify a Job Manager -Task, the Batch service creates the Job Manager Task when the Job is created, -and will try to schedule the Job Manager Task before scheduling other Tasks in -the Job. -""") + @doc("The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job.") jobManagerTask?: JobManagerTask; - @doc(""" -If a Job has a Job Preparation Task, the Batch service will run the Job -Preparation Task on a Node before starting any Tasks of that Job on that -Compute Node. -""") + @doc("The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node.") jobPreparationTask?: JobPreparationTask; - @doc(""" -The primary purpose of the Job Release Task is to undo changes to Nodes made by -the Job Preparation Task. Example activities include deleting local files, or -shutting down services that were started as part of Job preparation. A Job -Release Task cannot be specified without also specifying a Job Preparation Task -for the Job. The Batch service runs the Job Release Task on the Compute Nodes -that have run the Job Preparation Task. -""") + @doc("The Job Release Task for Jobs created under this schedule. The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task.") jobReleaseTask?: JobReleaseTask; - @doc(""" -Individual Tasks can override an environment setting specified here by -specifying the same setting name with a different value. -""") + @doc("A list of common environment variable settings. These environment variables are set for all Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value.") commonEnvironmentSettings?: EnvironmentSetting[]; - @doc("Specifies how a Job should be assigned to a Pool.") + @doc("The Pool on which the Batch service runs the Tasks of Jobs created under this schedule.") poolInfo: PoolInformation; - @doc(""" -The Batch service does not assign any meaning to metadata; it is solely for the -use of user code. -""") + @doc("A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.") metadata?: MetadataItem[]; } @doc("The network configuration for the Job.") model JobNetworkConfiguration { - @doc(""" -The virtual network must be in the same region and subscription as the Azure -Batch Account. The specified subnet should have enough free IP addresses to -accommodate the number of Compute Nodes which will run Tasks from the Job. This -can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' -service principal must have the 'Classic Virtual Machine Contributor' -Role-Based Access Control (RBAC) role for the specified VNet so that Azure -Batch service can schedule Tasks on the Nodes. This can be verified by checking -if the specified VNet has any associated Network Security Groups (NSG). If -communication to the Nodes in the specified subnet is denied by an NSG, then -the Batch service will set the state of the Compute Nodes to unusable. This is -of the form -/subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. -If the specified VNet has any associated Network Security Groups (NSG), then a -few reserved system ports must be enabled for inbound communication from the -Azure Batch service. For Pools created with a Virtual Machine configuration, -enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for -Windows. Port 443 is also required to be open for outbound connections for -communications to Azure Storage. For more details see: -https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration -""") + @doc("The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.") subnetId: string; } @doc("The execution constraints for a Job.") model JobConstraints { - @doc(""" -If the Job does not complete within the time limit, the Batch service -terminates it and any Tasks that are still running. In this case, the -termination reason will be MaxWallClockTimeExpiry. If this property is not -specified, there is no time limit on how long the Job may run. -""") + @doc("The maximum elapsed time that the Job may run, measured from the time the Job is created. If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run.") maxWallClockTime?: duration; - @doc(""" -Note that this value specifically controls the number of retries. The Batch -service will try each Task once, and may then retry up to this limit. For -example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one -initial try and 3 retries). If the maximum retry count is 0, the Batch service -does not retry Tasks. If the maximum retry count is -1, the Batch service -retries the Task without limit, however this is not recommended for a start -task or any task. The default value is 0 (no retries) -""") + @doc("The maximum number of times each Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries).") maxTaskRetryCount?: int32; } @doc(""" +Specifies details of a Job Manager Task. The Job Manager Task is automatically started when the Job is created. The Batch service tries to schedule the Job Manager Task before any other Tasks in the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where @@ -1419,143 +1256,73 @@ duplicate data. The best practice for long running Tasks is to use some form of checkpointing. """) model JobManagerTask { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores and cannot contain more than 64 characters. -""") + @doc("A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters.") id: string; - @doc(""" -It need not be unique and can contain any Unicode characters up to a maximum -length of 1024. -""") + @doc("The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024.") displayName?: string; - @doc(""" -The command line does not run under a shell, and therefore cannot take -advantage of shell features such as environment variable expansion. If you want -to take advantage of such features, you should invoke the shell in the command -line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c -MyCommand\" in Linux. If the command line refers to file paths, it should use a -relative path (relative to the Task working directory), or use the Batch -provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") + @doc("The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).") commandLine: string; - @doc(""" -If the Pool that will run this Task has containerConfiguration set, this must -be set as well. If the Pool that will run this Task doesn't have -containerConfiguration set, this must not be set. When this is specified, all -directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure -Batch directories on the node) are mapped into the container, all Task -environment variables are mapped into the container, and the Task command line -is executed in the container. Files produced in the container outside of -AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that -Batch file APIs will not be able to access those files. -""") + @doc("The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.") containerSettings?: TaskContainerSettings; - @doc(""" -Files listed under this element are located in the Task's working directory. -There is a maximum size for the list of resource files. When the max size is -exceeded, the request will fail and the response error code will be -RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be -reduced in size. This can be achieved using .zip files, Application Packages, -or Docker Containers. -""") + @doc("A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.") resourceFiles?: ResourceFile[]; - @doc(""" -For multi-instance Tasks, the files will only be uploaded from the Compute Node -on which the primary Task is executed. -""") + @doc("A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed.") outputFiles?: OutputFile[]; @doc("A list of environment variable settings for the Job Manager Task.") environmentSettings?: EnvironmentSetting[]; - @doc("Execution constraints to apply to a Task.") + @doc("Constraints that apply to the Job Manager Task.") constraints?: TaskConstraints; - @doc(""" -The default is 1. A Task can only be scheduled to run on a compute node if the -node has enough free scheduling slots available. For multi-instance Tasks, this -property is not supported and must not be specified. -""") + @doc("The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this property is not supported and must not be specified.") requiredSlots?: int32; - @doc(""" -If true, when the Job Manager Task completes, the Batch service marks the Job -as complete. If any Tasks are still running at this time (other than Job -Release), those Tasks are terminated. If false, the completion of the Job -Manager Task does not affect the Job status. In this case, you should either -use the onAllTasksComplete attribute to terminate the Job, or have a client or -user terminate the Job explicitly. An example of this is if the Job Manager -creates a set of Tasks but then takes no further role in their execution. The -default value is true. If you are using the onAllTasksComplete and -onTaskFailure attributes to control Job lifetime, and using the Job Manager -Task only to create the Tasks for the Job (not to monitor progress), then it is -important to set killJobOnCompletion to false. -""") + @doc("Whether completion of the Job Manager Task signifies completion of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false.") killJobOnCompletion?: boolean; - @doc("If omitted, the Task runs as a non-administrative user unique to the Task.") + @doc("The user identity under which the Job Manager Task runs. If omitted, the Task runs as a non-administrative user unique to the Task.") userIdentity?: UserIdentity; - @doc(""" -If true, no other Tasks will run on the same Node for as long as the Job -Manager is running. If false, other Tasks can run simultaneously with the Job -Manager on a Compute Node. The Job Manager Task counts normally against the -Compute Node's concurrent Task limit, so this is only relevant if the Compute -Node allows multiple concurrent Tasks. The default value is true. -""") + @doc("Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true.") runExclusive?: boolean; @doc(""" -Application Packages are downloaded and deployed to a shared directory, not the -Task working directory. Therefore, if a referenced Application Package is -already on the Compute Node, and is up to date, then it is not re-downloaded; +A list of Application Packages that the Batch service will deploy to the +Compute Node before running the command line.Application Packages are +downloaded and deployed to a shared directory, not the Task working +directory. Therefore, if a referenced Application Package is already +on the Compute Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Application Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. """) applicationPackageReferences?: ApplicationPackageReference[]; - @doc(""" -If this property is set, the Batch service provides the Task with an -authentication token which can be used to authenticate Batch service operations -without requiring an Account access key. The token is provided via the -AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the -Task can carry out using the token depend on the settings. For example, a Task -can request Job permissions in order to add other Tasks to the Job, or check -the status of the Job or of other Tasks under the Job. -""") + @doc("The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job.") authenticationTokenSettings?: AuthenticationTokenSettings; - @doc("The default value is true.") + @doc("Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is true.") allowLowPriorityNode?: boolean; } @doc("The container settings for a Task.") model TaskContainerSettings { - @doc(""" -These additional options are supplied as arguments to the \"docker create\" -command, in addition to those controlled by the Batch Service. -""") + @doc("Additional options to the container create command. These additional options are supplied as arguments to the \"docker create\" command, in addition to those controlled by the Batch Service.") containerRunOptions?: string; - @doc(""" -This is the full Image reference, as would be specified to \"docker pull\". If -no tag is provided as part of the Image name, the tag \":latest\" is used as a -default. -""") + @doc("The Image to use to create the container in which the Task will run. This is the full Image reference, as would be specified to \"docker pull\". If no tag is provided as part of the Image name, the tag \":latest\" is used as a default.") imageName: string; - @doc("This setting can be omitted if was already provided at Pool creation.") + @doc("The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation.") registry?: ContainerRegistry; - @doc("The default is 'taskWorkingDirectory'.") + @doc("The location of the container Task working directory. The default is 'taskWorkingDirectory'.") workingDirectory?: ContainerWorkingDirectory; } @@ -1567,174 +1334,96 @@ model ContainerRegistry { @doc("The password to log into the registry server.") password?: string; - @doc("If omitted, the default is \"docker.io\".") + @doc("The registry URL. If omitted, the default is \"docker.io\".") registryServer?: string; - @doc(""" -The reference to a user assigned identity associated with the Batch pool which -a compute node will use. -""") - identityReference?: ComputeNodeIdentityReference; + @doc("The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password.") + identityReference?: BatchNodeIdentityReference; } @doc(""" The reference to a user assigned identity associated with the Batch pool which a compute node will use. """) -model ComputeNodeIdentityReference { +model BatchNodeIdentityReference { @doc("The ARM resource id of the user assigned identity.") resourceId?: string; } @doc("A single file or multiple files to be downloaded to a Compute Node.") model ResourceFile { - @doc(""" -The autoStorageContainerName, storageContainerUrl and httpUrl properties are -mutually exclusive and one of them must be specified. -""") + @doc("The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified.") autoStorageContainerName?: string; - @doc(""" -The autoStorageContainerName, storageContainerUrl and httpUrl properties are -mutually exclusive and one of them must be specified. This URL must be readable -and listable from compute nodes. There are three ways to get such a URL for a -container in Azure storage: include a Shared Access Signature (SAS) granting -read and list permissions on the container, use a managed identity with read -and list permissions, or set the ACL for the container to allow public access. -""") + @doc("The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access.") storageContainerUrl?: string; - @doc(""" -The autoStorageContainerName, storageContainerUrl and httpUrl properties are -mutually exclusive and one of them must be specified. If the URL points to -Azure Blob Storage, it must be readable from compute nodes. There are three -ways to get such a URL for a blob in Azure storage: include a Shared Access -Signature (SAS) granting read permissions on the blob, use a managed identity -with read permission, or set the ACL for the blob or its container to allow -public access. -""") + @doc("The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access.") httpUrl?: string; - @doc(""" -The property is valid only when autoStorageContainerName or storageContainerUrl -is used. This prefix can be a partial filename or a subdirectory. If a prefix -is not specified, all the files in the container will be downloaded. -""") + @doc("The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded.") blobPrefix?: string; - @doc(""" -If the httpUrl property is specified, the filePath is required and describes -the path which the file will be downloaded to, including the filename. -Otherwise, if the autoStorageContainerName or storageContainerUrl property is -specified, filePath is optional and is the directory to download the files to. -In the case where filePath is used as a directory, any directory structure -already associated with the input data will be retained in full and appended to -the specified filePath directory. The specified relative path cannot break out -of the Task's working directory (for example by using '..'). -""") + @doc("The location on the Compute Node to which to download the file(s), relative to the Task's working directory. If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..').") filePath?: string; - @doc(""" -This property applies only to files being downloaded to Linux Compute Nodes. It -will be ignored if it is specified for a resourceFile which will be downloaded -to a Windows Compute Node. If this property is not specified for a Linux -Compute Node, then a default value of 0770 is applied to the file. -""") + @doc("The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file.") fileMode?: string; - @doc(""" -The reference to a user assigned identity associated with the Batch pool which -a compute node will use. -""") - identityReference?: ComputeNodeIdentityReference; + @doc("The reference to the user assigned identity to use to access Azure Blob Storage specified by storageContainerUrl or httpUrl.") + identityReference?: BatchNodeIdentityReference; } @doc(""" -On every file uploads, Batch service writes two log files to the compute node, -'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn -more about a specific failure. +On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. """) model OutputFile { - @doc(""" -Both relative and absolute paths are supported. Relative paths are relative to -the Task working directory. The following wildcards are supported: * matches 0 -or more characters (for example pattern abc* would match abc or abcdef), ** -matches any directory, ? matches any single character, [abc] matches one -character in the brackets, and [a-c] matches one character in the range. -Brackets can include a negation to match any character not specified (for -example [!abc] matches any character but a, b, or c). If a file name starts -with \".\" it is ignored by default but may be matched by specifying it -explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple -example: **\\*.txt matches any file that does not start in '.' and ends with -.txt in the Task working directory or any subdirectory. If the filename -contains a wildcard character it can be escaped using brackets (for example -abc[*] would match a file named abc*). Note that both \\ and / are treated as -directory separators on Windows, but only / is on Linux. Environment variables -(%var% on Windows or $var on Linux) are expanded prior to the pattern being -applied. -""") + @doc("A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. ") filePattern: string; - @doc("The destination to which a file should be uploaded.") + @doc("The destination for the output file(s).") destination: OutputFileDestination; - @doc(""" -Details about an output file upload operation, including under what conditions -to perform the upload. -""") + @doc("Additional options for the upload operation, including under what conditions to perform the upload.") uploadOptions: OutputFileUploadOptions; } @doc("The destination to which a file should be uploaded.") model OutputFileDestination { - @doc("Specifies a file upload destination within an Azure blob storage container.") + @doc("A location in Azure blob storage to which files are uploaded.") container?: OutputFileBlobContainerDestination; } @doc("Specifies a file upload destination within an Azure blob storage container.") model OutputFileBlobContainerDestination { - @doc(""" -If filePattern refers to a specific file (i.e. contains no wildcards), then -path is the name of the blob to which to upload that file. If filePattern -contains one or more wildcards (and therefore may match multiple files), then -path is the name of the blob virtual directory (which is prepended to each blob -name) to which to upload the file(s). If omitted, file(s) are uploaded to the -root of the container with a blob name matching their file name. -""") + @doc("The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name.") path?: string; - @doc(""" -If not using a managed identity, the URL must include a Shared Access Signature -(SAS) granting write permissions to the container. -""") + @doc("The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container.") containerUrl: string; - @doc("The identity must have write access to the Azure Blob Storage container") - identityReference?: ComputeNodeIdentityReference; + @doc("The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container.") + identityReference?: BatchNodeIdentityReference; - @doc(""" -These headers will be specified when uploading files to Azure Storage. Official -document on allowed headers when uploading blobs: -https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types -""") + @doc("A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types.") uploadHeaders?: HttpHeader[]; } @doc("An HTTP header name-value pair") model HttpHeader { - @doc("The case-insensitive name of the header to be used while uploading output files") + @doc("The case-insensitive name of the header to be used while uploading output files.") name: string; - @doc("The value of the header to be used while uploading output files") + @doc("The value of the header to be used while uploading output files.") value?: string; } @doc(""" -Details about an output file upload operation, including under what conditions +Options for an output file upload operation, including under what conditions to perform the upload. """) model OutputFileUploadOptions { - @doc("The default is taskcompletion.") + @doc("The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion.") uploadCondition: OutputFileUploadCondition; } @@ -1749,76 +1438,42 @@ model EnvironmentSetting { @doc("Execution constraints to apply to a Task.") model TaskConstraints { - @doc("If this is not specified, there is no time limit on how long the Task may run.") + @doc("The maximum elapsed time that the Task may run, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run.") maxWallClockTime?: duration; - @doc(""" -The default is 7 days, i.e. the Task directory will be retained for 7 days -unless the Compute Node is removed or the Job is deleted. -""") + @doc("The minimum time to retain the Task directory on the Compute Node where it ran, from the time it completes execution. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted.") retentionTime?: duration; - @doc(""" -Note that this value specifically controls the number of retries for the Task -executable due to a nonzero exit code. The Batch service will try the Task -once, and may then retry up to this limit. For example, if the maximum retry -count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). -If the maximum retry count is 0, the Batch service does not retry the Task -after the first attempt. If the maximum retry count is -1, the Batch service -retries the Task without limit, however this is not recommended for a start -task or any task. The default value is 0 (no retries) -""") + @doc("The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries).") maxTaskRetryCount?: int32; } -@doc("Specify either the userName or autoUser property, but not both.") +@doc("The definition of the user identity under which the Task is run. Specify either the userName or autoUser property, but not both.") model UserIdentity { - @doc(""" -The userName and autoUser properties are mutually exclusive; you must specify -one but not both. -""") + @doc("The name of the user identity under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both.") username?: string; - @doc(""" -The userName and autoUser properties are mutually exclusive; you must specify -one but not both. -""") + @doc("The auto user under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both.") autoUser?: AutoUserSpecification; } @doc(""" -Specifies the parameters for the auto user that runs a Task on the Batch -service. +Specifies the options for the auto user that runs an Azure Batch Task. """) model AutoUserSpecification { - @doc(""" -The default value is pool. If the pool is running Windows a value of Task -should be specified if stricter isolation between tasks is required. For -example, if the task mutates the registry in a way which could impact other -tasks, or if certificates have been specified on the pool which should not be -accessible by normal tasks but should be accessible by StartTasks. -""") + @doc("The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks.") scope?: AutoUserScope; - @doc("The default value is nonAdmin.") + @doc("The elevation level of the auto user. The default value is nonAdmin.") elevationLevel?: ElevationLevel; } @doc("A reference to an Package to be deployed to Compute Nodes.") model ApplicationPackageReference { - @doc(""" -When creating a pool, the package's application ID must be fully qualified -(/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). -""") + @doc("The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}).") applicationId: string; - @doc(""" -If this is omitted on a Pool, and no default version is specified for this -application, the request fails with the error code -InvalidApplicationPackageReferences and HTTP status code 409. If this is -omitted on a Task, and no default version is specified for this application, -the Task fails with a pre-processing error. -""") + @doc("The version of the application to deploy. If omitted, the default version is deployed. If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error.") version?: string; } @@ -1827,16 +1482,12 @@ The settings for an authentication token that the Task can use to perform Batch service operations. """) model AuthenticationTokenSettings { - @doc(""" -The authentication token grants access to a limited set of Batch service -operations. Currently the only supported value for the access property is -'job', which grants access to all operations related to the Job which contains -the Task. -""") + @doc("The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task.") access?: AccessScope[]; } @doc(""" +A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. You can use Job Preparation to prepare a Node to run Tasks for the Job. Activities commonly performed in Job Preparation include: Downloading common resource files used by all the Tasks in the Job. The Job Preparation Task can @@ -1864,88 +1515,36 @@ without causing any corruption or duplicate data. The best practice for long running Tasks is to use some form of checkpointing. """) model JobPreparationTask { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores and cannot contain more than 64 characters. If you do not -specify this property, the Batch service assigns a default value of -'jobpreparation'. No other Task in the Job can have the same ID as the Job -Preparation Task. If you try to submit a Task with the same id, the Batch -service rejects the request with error code TaskIdSameAsJobPreparationTask; if -you are calling the REST API directly, the HTTP status code is 409 (Conflict). -""") + @doc("A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict).") id?: string; - @doc(""" -The command line does not run under a shell, and therefore cannot take -advantage of shell features such as environment variable expansion. If you want -to take advantage of such features, you should invoke the shell in the command -line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c -MyCommand\" in Linux. If the command line refers to file paths, it should use a -relative path (relative to the Task working directory), or use the Batch -provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") + @doc("The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).") commandLine: string; - @doc(""" -When this is specified, all directories recursively below the -AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are -mapped into the container, all Task environment variables are mapped into the -container, and the Task command line is executed in the container. Files -produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be -reflected to the host disk, meaning that Batch file APIs will not be able to -access those files. -""") + @doc("The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.") containerSettings?: TaskContainerSettings; - @doc(""" -Files listed under this element are located in the Task's working directory. -There is a maximum size for the list of resource files. When the max size is -exceeded, the request will fail and the response error code will be -RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be -reduced in size. This can be achieved using .zip files, Application Packages, -or Docker Containers. -""") + @doc("A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.") resourceFiles?: ResourceFile[]; @doc("A list of environment variable settings for the Job Preparation Task.") environmentSettings?: EnvironmentSetting[]; - @doc("Execution constraints to apply to a Task.") + @doc("Constraints that apply to the Job Preparation Task.") constraints?: TaskConstraints; - @doc(""" -If true and the Job Preparation Task fails on a Node, the Batch service retries -the Job Preparation Task up to its maximum retry count (as specified in the -constraints element). If the Task has still not completed successfully after -all retries, then the Batch service will not schedule Tasks of the Job to the -Node. The Node remains active and eligible to run Tasks of other Jobs. If -false, the Batch service will not wait for the Job Preparation Task to -complete. In this case, other Tasks of the Job can start executing on the -Compute Node while the Job Preparation Task is still running; and even if the -Job Preparation Task fails, new Tasks will continue to be scheduled on the -Compute Node. The default value is true. -""") + @doc("Whether the Batch service should wait for the Job Preparation Task to complete successfully before scheduling any other Tasks of the Job on the Compute Node. A Job Preparation Task has completed successfully if it exits with exit code 0. If true and the Job Preparation Task fails on a Node, the Batch service retries the Job Preparation Task up to its maximum retry count (as specified in the constraints element). If the Task has still not completed successfully after all retries, then the Batch service will not schedule Tasks of the Job to the Node. The Node remains active and eligible to run Tasks of other Jobs. If false, the Batch service will not wait for the Job Preparation Task to complete. In this case, other Tasks of the Job can start executing on the Compute Node while the Job Preparation Task is still running; and even if the Job Preparation Task fails, new Tasks will continue to be scheduled on the Compute Node. The default value is true.") waitForSuccess?: boolean; - @doc(""" -If omitted, the Task runs as a non-administrative user unique to the Task on -Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux -Compute Nodes. -""") + @doc("The user identity under which the Job Preparation Task runs. If omitted, the Task runs as a non-administrative user unique to the Task on Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux Compute Nodes.") userIdentity?: UserIdentity; - @doc(""" -The Job Preparation Task is always rerun if a Compute Node is reimaged, or if -the Job Preparation Task did not complete (e.g. because the reboot occurred -while the Task was running). Therefore, you should always write a Job -Preparation Task to be idempotent and to behave correctly if run multiple -times. The default value is true. -""") + @doc("Whether the Batch service should rerun the Job Preparation Task after a Compute Node reboots. The Job Preparation Task is always rerun if a Compute Node is reimaged, or if the Job Preparation Task did not complete (e.g. because the reboot occurred while the Task was running). Therefore, you should always write a Job Preparation Task to be idempotent and to behave correctly if run multiple times. The default value is true.") rerunOnNodeRebootAfterSuccess?: boolean; } @doc(""" +A Job Release Task to run on Job completion on any Compute Node where the Job has run. The Job Release Task runs when the Job ends, because of one of the following: The user calls the Terminate Job API, or the Delete Job API while the Job is still active, the Job's maximum wall clock time constraint is reached, and the @@ -1963,87 +1562,37 @@ scheduling slot; that is, it does not count towards the taskSlotsPerNode limit specified on the Pool. """) model JobReleaseTask { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores and cannot contain more than 64 characters. If you do not -specify this property, the Batch service assigns a default value of -'jobrelease'. No other Task in the Job can have the same ID as the Job Release -Task. If you try to submit a Task with the same id, the Batch service rejects -the request with error code TaskIdSameAsJobReleaseTask; if you are calling the -REST API directly, the HTTP status code is 409 (Conflict). -""") + @doc("A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict).") id?: string; - @doc(""" -The command line does not run under a shell, and therefore cannot take -advantage of shell features such as environment variable expansion. If you want -to take advantage of such features, you should invoke the shell in the command -line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c -MyCommand\" in Linux. If the command line refers to file paths, it should use a -relative path (relative to the Task working directory), or use the Batch -provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") + @doc("The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).") commandLine: string; - @doc(""" -When this is specified, all directories recursively below the -AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are -mapped into the container, all Task environment variables are mapped into the -container, and the Task command line is executed in the container. Files -produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be -reflected to the host disk, meaning that Batch file APIs will not be able to -access those files. -""") + @doc("The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.") containerSettings?: TaskContainerSettings; - @doc("Files listed under this element are located in the Task's working directory.") + @doc("A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory.") resourceFiles?: ResourceFile[]; @doc("A list of environment variable settings for the Job Release Task.") environmentSettings?: EnvironmentSetting[]; - @doc(""" -The maximum elapsed time that the Job Release Task may run on a given Compute -Node, measured from the time the Task starts. If the Task does not complete -within the time limit, the Batch service terminates it. The default value is 15 -minutes. You may not specify a timeout longer than 15 minutes. If you do, the -Batch service rejects it with an error; if you are calling the REST API -directly, the HTTP status code is 400 (Bad Request). -""") + @doc("The maximum elapsed time that the Job Release Task may run on a given Compute Node, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. The default value is 15 minutes. You may not specify a timeout longer than 15 minutes. If you do, the Batch service rejects it with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") maxWallClockTime?: duration; - @doc(""" -The default is 7 days, i.e. the Task directory will be retained for 7 days -unless the Compute Node is removed or the Job is deleted. -""") + @doc("The minimum time to retain the Task directory for the Job Release Task on the Compute Node. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted.") retentionTime?: duration; - @doc("If omitted, the Task runs as a non-administrative user unique to the Task.") + @doc("The user identity under which the Job Release Task runs. If omitted, the Task runs as a non-administrative user unique to the Task.") userIdentity?: UserIdentity; } @doc("Specifies how a Job should be assigned to a Pool.") model PoolInformation { - @doc(""" -You must ensure that the Pool referenced by this property exists. If the Pool -does not exist at the time the Batch service tries to schedule a Job, no Tasks -for the Job will run until you create a Pool with that id. Note that the Batch -service will not reject the Job request; it will simply not run Tasks until the -Pool exists. You must specify either the Pool ID or the auto Pool -specification, but not both. -""") + @doc("The ID of an existing Pool. All the Tasks of the Job will run on the specified Pool. You must ensure that the Pool referenced by this property exists. If the Pool does not exist at the time the Batch service tries to schedule a Job, no Tasks for the Job will run until you create a Pool with that id. Note that the Batch service will not reject the Job request; it will simply not run Tasks until the Pool exists. You must specify either the Pool ID or the auto Pool specification, but not both.") poolId?: string; - @doc(""" -If auto Pool creation fails, the Batch service moves the Job to a completed -state, and the Pool creation error is set in the Job's scheduling error -property. The Batch service manages the lifetime (both creation and, unless -keepAlive is specified, deletion) of the auto Pool. Any user actions that -affect the lifetime of the auto Pool while the Job is active will result in -unexpected behavior. You must specify either the Pool ID or the auto Pool -specification, but not both. -""") + @doc("Characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. If auto Pool creation fails, the Batch service moves the Job to a completed state, and the Pool creation error is set in the Job's scheduling error property. The Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) of the auto Pool. Any user actions that affect the lifetime of the auto Pool while the Job is active will result in unexpected behavior. You must specify either the Pool ID or the auto Pool specification, but not both.") autoPoolSpecification?: AutoPoolSpecification; } @@ -2052,193 +1601,89 @@ Specifies characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. """) model AutoPoolSpecification { - @doc(""" -The Batch service assigns each auto Pool a unique identifier on creation. To -distinguish between Pools created for different purposes, you can specify this -element to add a prefix to the ID that is assigned. The prefix can be up to 20 -characters long. -""") + @doc("A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long.") autoPoolIdPrefix?: string; - @doc(""" -The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule -are assigned to Pools. -""") + @doc("The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools.") poolLifetimeOption: PoolLifetimeOption; - @doc(""" -If false, the Batch service deletes the Pool once its lifetime (as determined -by the poolLifetimeOption setting) expires; that is, when the Job or Job -Schedule completes. If true, the Batch service does not delete the Pool -automatically. It is up to the user to delete auto Pools created with this -option. -""") + @doc("Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option.") keepAlive?: boolean; - @doc("Specification for creating a new Pool.") + @doc("The Pool specification for the auto Pool.") pool?: PoolSpecification; } @doc("Specification for creating a new Pool.") model PoolSpecification { - @doc(""" -The display name need not be unique and can contain any Unicode characters up -to a maximum length of 1024. -""") + @doc("The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.") displayName?: string; - @doc(""" -For information about available sizes of virtual machines in Pools, see Choose -a VM size for Compute Nodes in an Azure Batch Pool -(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). -""") + @doc("The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).") vmSize: string; - @doc(""" -This property must be specified if the Pool needs to be created with Azure PaaS -VMs. This property and virtualMachineConfiguration are mutually exclusive and -one of the properties must be specified. If neither is specified then the Batch -service returns an error; if you are calling the REST API directly, the HTTP -status code is 400 (Bad Request). This property cannot be specified if the -Batch Account was created with its poolAllocationMode property set to -'UserSubscription'. -""") + @doc("The cloud service configuration for the Pool. This property must be specified if the Pool needs to be created with Azure PaaS VMs. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'.") cloudServiceConfiguration?: CloudServiceConfiguration; - @doc(""" -This property must be specified if the Pool needs to be created with Azure IaaS -VMs. This property and cloudServiceConfiguration are mutually exclusive and one -of the properties must be specified. If neither is specified then the Batch -service returns an error; if you are calling the REST API directly, the HTTP -status code is 400 (Bad Request). -""") + @doc("The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") virtualMachineConfiguration?: VirtualMachineConfiguration; - @doc(""" -The default value is 1. The maximum value is the smaller of 4 times the number -of cores of the vmSize of the pool or 256. -""") + @doc("The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.") taskSlotsPerNode?: int32; - @doc("If not specified, the default is spread.") + @doc("How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.") taskSchedulingPolicy?: TaskSchedulingPolicy; - @doc(""" -This timeout applies only to manual scaling; it has no effect when -enableAutoScale is set to true. The default value is 15 minutes. The minimum -value is 5 minutes. If you specify a value less than 5 minutes, the Batch -service rejects the request with an error; if you are calling the REST API -directly, the HTTP status code is 400 (Bad Request). -""") + @doc("The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") resizeTimeout?: duration; - @doc(""" -This property must not be specified if enableAutoScale is set to true. If -enableAutoScale is set to false, then you must set either targetDedicatedNodes, -targetLowPriorityNodes, or both. -""") + @doc("The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.") targetDedicatedNodes?: int32; - @doc(""" -This property must not be specified if enableAutoScale is set to true. If -enableAutoScale is set to false, then you must set either targetDedicatedNodes, -targetLowPriorityNodes, or both. -""") + @doc("The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.") targetLowPriorityNodes?: int32; - @doc(""" -If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must -be specified. If true, the autoScaleFormula element is required. The Pool -automatically resizes according to the formula. The default value is false. -""") + @doc("Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The Pool automatically resizes according to the formula. The default value is false.") enableAutoScale?: boolean; - @doc(""" -This property must not be specified if enableAutoScale is set to false. It is -required if enableAutoScale is set to true. The formula is checked for validity -before the Pool is created. If the formula is not valid, the Batch service -rejects the request with detailed error information. -""") + @doc("The formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information.") autoScaleFormula?: string; - @doc(""" -The default value is 15 minutes. The minimum and maximum value are 5 minutes -and 168 hours respectively. If you specify a value less than 5 minutes or -greater than 168 hours, the Batch service rejects the request with an invalid -property value error; if you are calling the REST API directly, the HTTP status -code is 400 (Bad Request). -""") + @doc("The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") autoScaleEvaluationInterval?: duration; - @doc(""" -Enabling inter-node communication limits the maximum size of the Pool due to -deployment restrictions on the Compute Nodes of the Pool. This may result in -the Pool not reaching its desired size. The default value is false. -""") + @doc("Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.") enableInterNodeCommunication?: boolean; - @doc("The network configuration for a Pool.") + @doc("The network configuration for the Pool.") networkConfiguration?: NetworkConfiguration; - @doc(""" -Batch will retry Tasks when a recovery operation is triggered on a Node. -Examples of recovery operations include (but are not limited to) when an -unhealthy Node is rebooted or a Compute Node disappeared due to host failure. -Retries due to recovery operations are independent of and are not counted -against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal -retry due to a recovery operation may occur. Because of this, all Tasks should -be idempotent. This means Tasks need to tolerate being interrupted and -restarted without causing any corruption or duplicate data. The best practice -for long running Tasks is to use some form of checkpointing. In some cases the -StartTask may be re-run even though the Compute Node was not rebooted. Special -care should be taken to avoid StartTasks which create breakaway process or -install/launch services from the StartTask working directory, as this will -block Batch from being able to re-run the StartTask. -""") + @doc("A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted.") startTask?: StartTask; @doc(""" -For Windows Nodes, the Batch service installs the Certificates to the specified -Certificate store and location. For Linux Compute Nodes, the Certificates are -stored in a directory inside the Task working directory and an environment -variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this -location. For Certificates with visibility of 'remoteUser', a 'certs' directory -is created in the user's home directory (e.g., /home/{user-name}/certs) and -Certificates are placed in that directory. +For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. +Warning: This property is deprecated and will be removed after February, 2024. +Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. """) certificateReferences?: CertificateReference[]; - @doc(""" -When creating a pool, the package's application ID must be fully qualified -(/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). -Changes to Package references affect all new Nodes joining the Pool, but do not -affect Compute Nodes that are already in the Pool until they are rebooted or -reimaged. There is a maximum of 10 Package references on any given Pool. -""") + @doc("The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool.") applicationPackageReferences?: ApplicationPackageReference[]; - @doc(""" -The list of application licenses must be a subset of available Batch service -application licenses. If a license is requested which is not supported, Pool -creation will fail. The permitted licenses available on the Pool are 'maya', -'vray', '3dsmax', 'arnold'. An additional charge applies for each application -license added to the Pool. -""") + @doc("The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. The permitted licenses available on the Pool are 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each application license added to the Pool.") applicationLicenses?: string[]; @doc("The list of user Accounts to be created on each Compute Node in the Pool.") userAccounts?: UserAccount[]; - @doc(""" -The Batch service does not assign any meaning to metadata; it is solely for the -use of user code. -""") + @doc("A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.") metadata?: MetadataItem[]; - @doc("This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.") + @doc("A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.") mountConfiguration?: MountConfiguration[]; - @doc("If omitted, the default value is Default.") + @doc("The desired node communication mode for the pool. If omitted, the default value is Default.") targetNodeCommunicationMode?: NodeCommunicationMode; } @@ -2262,54 +1707,27 @@ information, see Azure Guest OS Releases """) osFamily: string; - @doc(""" -The default value is * which specifies the latest operating system version for -the specified OS family. -""") + @doc("The Azure Guest OS version to be installed on the virtual machines in the Pool. The default value is * which specifies the latest operating system version for the specified OS family.") osVersion?: string; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc(""" The configuration for Compute Nodes in a Pool based on the Azure Virtual Machines infrastructure. """) model VirtualMachineConfiguration { - @doc(""" -A reference to an Azure Virtual Machines Marketplace Image or a Shared Image -Gallery Image. To get the list of all Azure Marketplace Image references -verified by Azure Batch, see the 'List Supported Images' operation. -""") + @doc("A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use.") imageReference: ImageReference; - @doc(""" -The Batch Compute Node agent is a program that runs on each Compute Node in the -Pool, and provides the command-and-control interface between the Compute Node -and the Batch service. There are different implementations of the Compute Node -agent, known as SKUs, for different operating systems. You must specify a -Compute Node agent SKU which matches the selected Image reference. To get the -list of supported Compute Node agent SKUs along with their list of verified -Image references, see the 'List supported Compute Node agent SKUs' operation. -""") + @doc("The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation.") + @projectedName("client", "nodeAgentSkuId") nodeAgentSKUId: string; - @doc(""" -This property must not be specified if the imageReference property specifies a -Linux OS Image. -""") + @doc("Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image.") windowsConfiguration?: WindowsConfiguration; - @doc(""" -This property must be specified if the Compute Nodes in the Pool need to have -empty data disks attached to them. This cannot be updated. Each Compute Node -gets its own disk (the disk is not a file share). Existing disks cannot be -attached, each attached disk is empty. When the Compute Node is removed from -the Pool, the disk and all data associated with it is also deleted. The disk is -not formatted after being attached, it must be formatted before use - for more -information see -https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux -and -https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. -""") + @doc("The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine.") dataDisks?: DataDisk[]; @doc(""" @@ -2325,66 +1743,46 @@ Server. """) licenseType?: string; - @doc(""" -If specified, setup is performed on each Compute Node in the Pool to allow -Tasks to run in containers. All regular Tasks and Job manager Tasks run on this -Pool must specify the containerSettings property, and all other Tasks may -specify it. -""") + @doc("The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it.") containerConfiguration?: ContainerConfiguration; - @doc(""" -If specified, encryption is performed on each node in the pool during node -provisioning. -""") + @doc("The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning.") diskEncryptionConfiguration?: DiskEncryptionConfiguration; - @doc(""" -This configuration will specify rules on how nodes in the pool will be -physically allocated. -""") + @doc("The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated.") nodePlacementConfiguration?: NodePlacementConfiguration; - @doc(""" -If specified, the extensions mentioned in this configuration will be installed -on each node. -""") + @doc("The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node.") extensions?: VMExtension[]; - @doc("Settings for the operating system disk of the compute node (VM).") + @doc("Settings for the operating system disk of the Virtual Machine.") osDisk?: OSDisk; } @doc("Windows operating system settings to apply to the virtual machine.") model WindowsConfiguration { - @doc("If omitted, the default value is true.") + @doc("Whether automatic updates are enabled on the virtual machine. If omitted, the default value is true.") enableAutomaticUpdates?: boolean; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc(""" Settings which will be used by the data disks associated to Compute Nodes in the Pool. When using attached data disks, you need to mount and format the disks from within a VM to use them. """) model DataDisk { - @doc(""" -The lun is used to uniquely identify each data disk. If attaching multiple -disks, each should have a distinct lun. The value must be between 0 and 63, -inclusive. -""") + @doc("The logical unit number. The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. The value must be between 0 and 63, inclusive.") lun: int32; - @doc(""" -The default value for caching is readwrite. For information about the caching -options see: -https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. -""") + @doc("The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/.") caching?: CachingType; @doc("The initial disk size in gigabytes.") + @projectedName("client", "diskSizeGb") diskSizeGB: int32; - @doc("If omitted, the default is \"standard_lrs\".") + @doc("The storage Account type to be used for the data disk. If omitted, the default is \"standard_lrs\".") storageAccountType?: StorageAccountType; } @@ -2393,17 +1791,10 @@ model ContainerConfiguration { @doc("The container technology to be used.") type: ContainerType; - @doc(""" -This is the full Image reference, as would be specified to \"docker pull\". An -Image will be sourced from the default Docker registry unless the Image is -fully qualified with an alternative registry. -""") + @doc("The collection of container Image names. This is the full Image reference, as would be specified to \"docker pull\". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry.") containerImageNames?: string[]; - @doc(""" -If any Images must be downloaded from a private registry which requires -credentials, then those credentials must be provided here. -""") + @doc("Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here.") containerRegistries?: ContainerRegistry[]; } @@ -2413,11 +1804,7 @@ encryption configuration is not supported on Linux pool created with Shared Image Gallery Image. """) model DiskEncryptionConfiguration { - @doc(""" -If omitted, no disks on the compute nodes in the pool will be encrypted. On -Linux pool, only \"TemporaryDisk\" is supported; on Windows pool, \"OsDisk\" -and \"TemporaryDisk\" must be specified. -""") + @doc("The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only \"TemporaryDisk\" is supported; on Windows pool, \"OsDisk\" and \"TemporaryDisk\" must be specified.") targets?: DiskEncryptionTarget[]; } @@ -2427,13 +1814,11 @@ For zonal placement, nodes in the pool will be spread across different zones with best effort balancing. """) model NodePlacementConfiguration { - @doc(""" -Allocation policy used by Batch Service to provision the nodes. If not -specified, Batch will use the regional policy. -""") + @doc("Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy.") policy?: NodePlacementPolicyType; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The configuration for virtual machine extensions.") model VMExtension { @doc("The name of the virtual machine extension.") @@ -2448,35 +1833,26 @@ model VMExtension { @doc("The version of script handler.") typeHandlerVersion?: string; - @doc(""" -Indicates whether the extension should use a newer minor version if one is -available at deployment time. Once deployed, however, the extension will not -upgrade minor versions unless redeployed, even with this property set to true. -""") + @doc("Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.") autoUpgradeMinorVersion?: boolean; + @doc("Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.") + enableAutomaticUpgrade?: boolean; + @doc("JSON formatted public settings for the extension.") - settings?: object; + settings?: Record; - @doc(""" -The extension can contain either protectedSettings or -protectedSettingsFromKeyVault or no protected settings at all. -""") - protectedSettings?: object; + @doc("The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.") + protectedSettings?: Record; - @doc(""" -Collection of extension names after which this extension needs to be -provisioned. -""") + @doc("The collection of extension names. Collection of extension names after which this extension needs to be provisioned.") provisionAfterExtensions?: string[]; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("Settings for the operating system disk of the compute node (VM).") model OSDisk { - @doc(""" -Specifies the ephemeral Disk Settings for the operating system disk used by the -compute node (VM). -""") + @doc("Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM).") ephemeralOSDiskSettings?: DiffDiskSettings; } @@ -2485,174 +1861,93 @@ Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). """) model DiffDiskSettings { - @doc(""" -This property can be used by user in the request to choose the location e.g., -cache disk space for Ephemeral OS disk provisioning. For more information on -Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size -requirements for Windows VMs at -https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements -and Linux VMs at -https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. -""") + @doc("Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements.") placement?: DiffDiskPlacement; } @doc("Specifies how Tasks should be distributed across Compute Nodes.") model TaskSchedulingPolicy { - @doc("If not specified, the default is spread.") - nodeFillType: ComputeNodeFillType; + @doc("How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.") + nodeFillType: BatchNodeFillType; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The network configuration for a Pool.") model NetworkConfiguration { - @doc(""" -The virtual network must be in the same region and subscription as the Azure -Batch Account. The specified subnet should have enough free IP addresses to -accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have -enough free IP addresses, the Pool will partially allocate Nodes and a resize -error will occur. The 'MicrosoftAzureBatch' service principal must have the -'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for -the specified VNet. The specified subnet must allow communication from the -Azure Batch service to be able to schedule Tasks on the Nodes. This can be -verified by checking if the specified VNet has any associated Network Security -Groups (NSG). If communication to the Nodes in the specified subnet is denied -by an NSG, then the Batch service will set the state of the Compute Nodes to -unusable. For Pools created with virtualMachineConfiguration only ARM virtual -networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools -created with cloudServiceConfiguration both ARM and classic virtual networks -are supported. If the specified VNet has any associated Network Security Groups -(NSG), then a few reserved system ports must be enabled for inbound -communication. For Pools created with a virtual machine configuration, enable -ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. -For Pools created with a cloud service configuration, enable ports 10100, -20100, and 30100. Also enable outbound connections to Azure Storage on port -443. For more details see: -https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration -""") + @doc("The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools created with cloudServiceConfiguration both ARM and classic virtual networks are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For Pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.") subnetId?: string; @doc("The scope of dynamic vnet assignment.") dynamicVNetAssignmentScope?: DynamicVNetAssignmentScope; - @doc(""" -Pool endpoint configuration is only supported on Pools with the -virtualMachineConfiguration property. -""") + @doc("The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property.") endpointConfiguration?: PoolEndpointConfiguration; - @doc(""" -Public IP configuration property is only supported on Pools with the -virtualMachineConfiguration property. -""") + @doc("The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property.") + @projectedName("client", "publicIpAddressConfiguration") publicIPAddressConfiguration?: PublicIPAddressConfiguration; + + @doc("Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview.") + enableAcceleratedNetworking?: boolean; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The endpoint configuration for a Pool.") model PoolEndpointConfiguration { - @doc(""" -The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum -number of inbound NAT Pools is exceeded the request fails with HTTP status code -400. This cannot be specified if the IPAddressProvisioningType is -NoPublicIPAddresses. -""") + @doc("A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses.") + @projectedName("client", "inboundNatPools") inboundNATPools: InboundNATPool[]; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc(""" A inbound NAT Pool that can be used to address specific ports on Compute Nodes in a Batch Pool externally. """) model InboundNATPool { - @doc(""" -The name must be unique within a Batch Pool, can contain letters, numbers, -underscores, periods, and hyphens. Names must start with a letter or number, -must end with a letter, number, or underscore, and cannot exceed 77 characters. - If any invalid values are provided the request fails with HTTP status code -400. -""") + @doc("The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400.") name: string; @doc("The protocol of the endpoint.") protocol: InboundEndpointProtocol; - @doc(""" -This must be unique within a Batch Pool. Acceptable values are between 1 and -65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any -reserved values are provided the request fails with HTTP status code 400. -""") + @doc("The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400.") backendPort: int32; - @doc(""" -Acceptable values range between 1 and 65534 except ports from 50000 to 55000 -which are reserved. All ranges within a Pool must be distinct and cannot -overlap. Each range must contain at least 40 ports. If any reserved or -overlapping values are provided the request fails with HTTP status code 400. -""") + @doc("The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400.") frontendPortRangeStart: int32; - @doc(""" -Acceptable values range between 1 and 65534 except ports from 50000 to 55000 -which are reserved by the Batch service. All ranges within a Pool must be -distinct and cannot overlap. Each range must contain at least 40 ports. If any -reserved or overlapping values are provided the request fails with HTTP status -code 400. -""") + @doc("The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400.") frontendPortRangeEnd: int32; - @doc(""" -The maximum number of rules that can be specified across all the endpoints on a -Batch Pool is 25. If no network security group rules are specified, a default -rule will be created to allow inbound access to the specified backendPort. If -the maximum number of network security group rules is exceeded the request -fails with HTTP status code 400. -""") + @doc("A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400.") networkSecurityGroupRules?: NetworkSecurityGroupRule[]; } @doc("A network security group rule to apply to an inbound endpoint.") model NetworkSecurityGroupRule { - @doc(""" -Priorities within a Pool must be unique and are evaluated in order of priority. -The lower the number the higher the priority. For example, rules could be -specified with order numbers of 150, 250, and 350. The rule with the order -number of 150 takes precedence over the rule that has an order of 250. Allowed -priorities are 150 to 4096. If any reserved or duplicate values are provided -the request fails with HTTP status code 400. -""") + @doc("The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400.") priority: int32; @doc("The action that should be taken for a specified IP address, subnet range or tag.") access: NetworkSecurityGroupRuleAccess; - @doc(""" -Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. -192.168.1.0/24), default tag, or * (for all addresses). If any other values -are provided the request fails with HTTP status code 400. -""") + @doc("The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400.") sourceAddressPrefix: string; - @doc(""" -Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a -port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each -entry in this collection must not overlap any other entry (either a range or an -individual port). If any other values are provided the request fails with HTTP -status code 400. The default value is '*'. -""") + @doc("The source port ranges to match for the rule. Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'.") sourcePortRanges?: string[]; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The public IP Address configuration of the networking configuration of a Pool.") +@projectedName("client", "PublicIpAddressConfiguration") model PublicIPAddressConfiguration { - @doc("The default value is BatchManaged.") + @doc("The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged.") + @projectedName("client", "IpAddressProvisioningType") provision?: IPAddressProvisioningType; - @doc(""" -The number of IPs specified here limits the maximum size of the Pool - 100 -dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public -IP. For example, a pool needing 250 dedicated VMs would need at least 3 public -IPs specified. Each element of this collection is of the form: -/subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. -""") + @doc("The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.") ipAddressIds?: string[]; } @@ -2672,65 +1967,29 @@ install/launch services from the StartTask working directory, as this will block Batch from being able to re-run the StartTask. """) model StartTask { - @doc(""" -The command line does not run under a shell, and therefore cannot take -advantage of shell features such as environment variable expansion. If you want -to take advantage of such features, you should invoke the shell in the command -line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c -MyCommand\" in Linux. If the command line refers to file paths, it should use a -relative path (relative to the Task working directory), or use the Batch -provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") + @doc("The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).") commandLine: string; - @doc(""" -When this is specified, all directories recursively below the -AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are -mapped into the container, all Task environment variables are mapped into the -container, and the Task command line is executed in the container. Files -produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be -reflected to the host disk, meaning that Batch file APIs will not be able to -access those files. -""") + @doc("The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.") containerSettings?: TaskContainerSettings; - @doc("Files listed under this element are located in the Task's working directory.") + @doc("A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory.") resourceFiles?: ResourceFile[]; @doc("A list of environment variable settings for the StartTask.") environmentSettings?: EnvironmentSetting[]; - @doc("If omitted, the Task runs as a non-administrative user unique to the Task.") + @doc("The user identity under which the StartTask runs. If omitted, the Task runs as a non-administrative user unique to the Task.") userIdentity?: UserIdentity; - @doc(""" -The Batch service retries a Task if its exit code is nonzero. Note that this -value specifically controls the number of retries. The Batch service will try -the Task once, and may then retry up to this limit. For example, if the maximum -retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 -retries). If the maximum retry count is 0, the Batch service does not retry the -Task. If the maximum retry count is -1, the Batch service retries the Task -without limit, however this is not recommended for a start task or any task. -The default value is 0 (no retries) -""") + @doc("The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries).") maxTaskRetryCount?: int32; - @doc(""" -If true and the StartTask fails on a Node, the Batch service retries the -StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has -still not completed successfully after all retries, then the Batch service -marks the Node unusable, and will not schedule Tasks to it. This condition can -be detected via the Compute Node state and failure info details. If false, the -Batch service will not wait for the StartTask to complete. In this case, other -Tasks can start executing on the Compute Node while the StartTask is still -running; and even if the StartTask fails, new Tasks will continue to be -scheduled on the Compute Node. The default is true. -""") + @doc("Whether the Batch service should wait for the StartTask to complete successfully (that is, to exit with exit code 0) before scheduling any Tasks on the Compute Node. If true and the StartTask fails on a Node, the Batch service retries the StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has still not completed successfully after all retries, then the Batch service marks the Node unusable, and will not schedule Tasks to it. This condition can be detected via the Compute Node state and failure info details. If false, the Batch service will not wait for the StartTask to complete. In this case, other Tasks can start executing on the Compute Node while the StartTask is still running; and even if the StartTask fails, new Tasks will continue to be scheduled on the Compute Node. The default is true.") waitForSuccess?: boolean; } -@doc("A reference to a Certificate to be installed on Compute Nodes in a Pool.") +@doc("A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead.") model CertificateReference { @doc("The thumbprint of the Certificate.") thumbprint: string; @@ -2738,33 +1997,13 @@ model CertificateReference { @doc("The algorithm with which the thumbprint is associated. This must be sha1.") thumbprintAlgorithm: string; - @doc(""" -The default value is currentuser. This property is applicable only for Pools -configured with Windows Compute Nodes (that is, created with -cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows -Image reference). For Linux Compute Nodes, the Certificates are stored in a -directory inside the Task working directory and an environment variable -AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. -For Certificates with visibility of 'remoteUser', a 'certs' directory is -created in the user's home directory (e.g., /home/{user-name}/certs) and -Certificates are placed in that directory. -""") + @doc("The location of the Certificate store on the Compute Node into which to install the Certificate. The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory.") storeLocation?: CertificateStoreLocation; - @doc(""" -This property is applicable only for Pools configured with Windows Compute -Nodes (that is, created with cloudServiceConfiguration, or with -virtualMachineConfiguration using a Windows Image reference). Common store -names include: My, Root, CA, Trust, Disallowed, TrustedPeople, -TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be -used. The default value is My. -""") + @doc("The name of the Certificate store on the Compute Node into which to install the Certificate. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My.") storeName?: string; - @doc(""" -You can specify more than one visibility in this collection. The default is all -Accounts. -""") + @doc("Which user Accounts on the Compute Node should have access to the private data of the Certificate. You can specify more than one visibility in this collection. The default is all Accounts.") visibility?: CertificateVisibility[]; } @@ -2773,63 +2012,37 @@ Properties used to create a user used to execute Tasks on an Azure Batch Compute Node. """) model UserAccount { - @doc(""" -The name of the user Account. Names can contain any Unicode characters up to a -maximum length of 20. -""") + @doc("The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20.") name: string; @doc("The password for the user Account.") password: string; - @doc("The default value is nonAdmin.") + @doc("The elevation level of the user Account. The default value is nonAdmin.") elevationLevel?: ElevationLevel; - @doc(""" -This property is ignored if specified on a Windows Pool. If not specified, the -user is created with the default options. -""") + @doc("The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options.") linuxUserConfiguration?: LinuxUserConfiguration; - @doc(""" -This property can only be specified if the user is on a Windows Pool. If not -specified and on a Windows Pool, the user is created with the default options. -""") + @doc("The Windows-specific user configuration for the user Account. This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options.") windowsUserConfiguration?: WindowsUserConfiguration; } @doc("Properties used to create a user Account on a Linux Compute Node.") model LinuxUserConfiguration { - @doc(""" -The uid and gid properties must be specified together or not at all. If not -specified the underlying operating system picks the uid. -""") + @doc("The user ID of the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid.") uid?: int32; - @doc(""" -The uid and gid properties must be specified together or not at all. If not -specified the underlying operating system picks the gid. -""") + @doc("The group ID for the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid.") gid?: int32; - @doc(""" -The private key must not be password protected. The private key is used to -automatically configure asymmetric-key based authentication for SSH between -Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication -property is true (it is ignored if enableInterNodeCommunication is false). It -does this by placing the key pair into the user's .ssh directory. If not -specified, password-less SSH is not configured between Compute Nodes (no -modification of the user's .ssh directory is done). -""") + @doc("The SSH private key for the user Account. The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done).") sshPrivateKey?: string; } @doc("Properties used to create a user Account on a Windows Compute Node.") model WindowsUserConfiguration { - @doc(""" -The default value for VirtualMachineConfiguration Pools is 'batch' and for -CloudServiceConfiguration Pools is 'interactive'. -""") + @doc("The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch' and for CloudServiceConfiguration Pools is 'interactive'.") loginMode?: LoginMode; } @@ -2847,16 +2060,16 @@ model MetadataItem { @doc("The file system to mount on each node.") model MountConfiguration { - @doc("This property is mutually exclusive with all other properties.") + @doc("The Azure Storage Container to mount using blob FUSE on each node. This property is mutually exclusive with all other properties.") azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; - @doc("This property is mutually exclusive with all other properties.") + @doc("The NFS file system to mount on each node. This property is mutually exclusive with all other properties.") nfsMountConfiguration?: NFSMountConfiguration; - @doc("This property is mutually exclusive with all other properties.") + @doc("The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all other properties.") cifsMountConfiguration?: CifsMountConfiguration; - @doc("This property is mutually exclusive with all other properties.") + @doc("The Azure File Share to mount on each node. This property is mutually exclusive with all other properties.") azureFileShareConfiguration?: AzureFileShareConfiguration; } @@ -2868,46 +2081,33 @@ model AzureBlobFileSystemConfiguration { @doc("The Azure Blob Storage Container name.") containerName: string; - @doc(""" -This property is mutually exclusive with both sasKey and identity; exactly one -must be specified. -""") + @doc("The Azure Storage Account key. This property is mutually exclusive with both sasKey and identity; exactly one must be specified.") accountKey?: string; - @doc(""" -This property is mutually exclusive with both accountKey and identity; exactly -one must be specified. -""") + @doc("The Azure Storage SAS token. This property is mutually exclusive with both accountKey and identity; exactly one must be specified.") sasKey?: string; - @doc("These are 'net use' options in Windows and 'mount' options in Linux.") + @doc("Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux.") blobfuseOptions?: string; - @doc(""" -All file systems are mounted relative to the Batch mounts directory, accessible -via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. -""") + @doc("The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.") relativeMountPath: string; - @doc(""" -This property is mutually exclusive with both accountKey and sasKey; exactly -one must be specified. -""") - identityReference?: ComputeNodeIdentityReference; + @doc("The reference to the user assigned identity to use to access containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one must be specified.") + identityReference?: BatchNodeIdentityReference; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("Information used to connect to an NFS file system.") +@projectedName("client", "NfsMountConfiguration") model NFSMountConfiguration { @doc("The URI of the file system to mount.") source: string; - @doc(""" -All file systems are mounted relative to the Batch mounts directory, accessible -via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. -""") + @doc("The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.") relativeMountPath: string; - @doc("These are 'net use' options in Windows and 'mount' options in Linux.") + @doc("Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux.") mountOptions?: string; } @@ -2919,13 +2119,10 @@ model CifsMountConfiguration { @doc("The URI of the file system to mount.") source: string; - @doc(""" -All file systems are mounted relative to the Batch mounts directory, accessible -via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. -""") + @doc("The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.") relativeMountPath: string; - @doc("These are 'net use' options in Windows and 'mount' options in Linux.") + @doc("Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux.") mountOptions?: string; @doc("The password to use for authentication against the CIFS file system.") @@ -2937,19 +2134,16 @@ model AzureFileShareConfiguration { @doc("The Azure Storage account name.") accountName: string; - @doc("This is of the form 'https://{account}.file.core.windows.net/'.") + @doc("The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'.") azureFileUrl: string; @doc("The Azure Storage account key.") accountKey: string; - @doc(""" -All file systems are mounted relative to the Batch mounts directory, accessible -via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. -""") + @doc("The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.") relativeMountPath: string; - @doc("These are 'net use' options in Windows and 'mount' options in Linux.") + @doc("Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux.") mountOptions?: string; } @@ -2958,20 +2152,13 @@ Contains information about Jobs that have been and will be run under a Job Schedule. """) model JobScheduleExecutionInformation { - @doc(""" -This property is meaningful only if the schedule is in the active state when -the time comes around. For example, if the schedule is disabled, no Job will be -created at nextRunTime unless the Job is enabled before then. -""") + @doc("The next time at which a Job will be created under this schedule. This property is meaningful only if the schedule is in the active state when the time comes around. For example, if the schedule is disabled, no Job will be created at nextRunTime unless the Job is enabled before then.") nextRunTime?: utcDateTime; - @doc(""" -This property is present only if the at least one Job has run under the -schedule. -""") + @doc("Information about the most recent Job under the Job Schedule. This property is present only if the at least one Job has run under the schedule.") recentJob?: RecentJob; - @doc("This property is set only if the Job Schedule is in the completed state.") + @doc("The time at which the schedule ended. This property is set only if the Job Schedule is in the completed state.") endTime?: utcDateTime; } @@ -2984,6 +2171,7 @@ model RecentJob { url?: string; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("Resource usage statistics for a Job Schedule.") model JobScheduleStatistics { @doc("The URL of the statistics.") @@ -2992,84 +2180,43 @@ model JobScheduleStatistics { @doc("The start time of the time range covered by the statistics.") startTime: utcDateTime; - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") + @doc("The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime.") lastUpdateTime: utcDateTime; - @doc(""" -The total user mode CPU time (summed across all cores and all Compute Nodes) -consumed by all Tasks in all Jobs created under the schedule. -""") + @doc("The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule.") userCPUTime: duration; - @doc(""" -The total kernel mode CPU time (summed across all cores and all Compute Nodes) -consumed by all Tasks in all Jobs created under the schedule. -""") + @doc("The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule.") kernelCPUTime: duration; - @doc(""" -The wall clock time is the elapsed time from when the Task started running on a -Compute Node to when it finished (or to the last time the statistics were -updated, if the Task had not finished by then). If a Task was retried, this -includes the wall clock time of all the Task retries. -""") + @doc("The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries.") wallClockTime: duration; - @doc(""" -The total number of disk read operations made by all Tasks in all Jobs created -under the schedule. -""") + @doc("The total number of disk read operations made by all Tasks in all Jobs created under the schedule.") readIOps: int32; - @doc(""" -The total number of disk write operations made by all Tasks in all Jobs created -under the schedule. -""") + @doc("The total number of disk write operations made by all Tasks in all Jobs created under the schedule.") writeIOps: int32; - @doc(""" -The total gibibytes read from disk by all Tasks in all Jobs created under the -schedule. -""") + @doc("The total gibibytes read from disk by all Tasks in all Jobs created under the schedule.") readIOGiB: float32; - @doc(""" -The total gibibytes written to disk by all Tasks in all Jobs created under the -schedule. -""") + @doc("The total gibibytes written to disk by all Tasks in all Jobs created under the schedule.") writeIOGiB: float32; - @doc(""" -The total number of Tasks successfully completed during the given time range in -Jobs created under the schedule. A Task completes successfully if it returns -exit code 0. -""") + @doc("The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0.") numSucceededTasks: int32; - @doc(""" -The total number of Tasks that failed during the given time range in Jobs -created under the schedule. A Task fails if it exhausts its maximum retry count -without returning exit code 0. -""") + @doc("The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0.") numFailedTasks: int32; - @doc(""" -The total number of retries during the given time range on all Tasks in all -Jobs created under the schedule. -""") + @doc("The total number of retries during the given time range on all Tasks in all Jobs created under the schedule.") numTaskRetries: int32; - @doc(""" -This value is only reported in the Account lifetime statistics; it is not -included in the Job statistics. -""") + @doc("The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics.") waitTime: duration; } - @doc("The result of listing the Job Schedules in an Account.") @pagedResult model BatchJobScheduleListResult { @@ -3077,49 +2224,35 @@ model BatchJobScheduleListResult { @items value?: BatchJobSchedule[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc("An Azure Batch Job.") model BatchJob { - @doc(""" -The ID is case-preserving and case-insensitive (that is, you may not have two -IDs within an Account that differ only by case). -""") - @visibility("read","create") + @doc("A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).") + @visibility("read") id?: string; @doc("The display name for the Job.") - @visibility("read","create") + @visibility("read") displayName?: string; - @doc(""" -Whether Tasks in the Job can define dependencies on each other. The default is -false. -""") - @visibility("read","create") + @doc("Whether Tasks in the Job can define dependencies on each other. The default is false.") + @visibility("read") usesTaskDependencies?: boolean; @doc("The URL of the Job.") @visibility("read") url?: string; - @doc(""" -This is an opaque string. You can use it to detect whether the Job has changed -between requests. In particular, you can be pass the ETag when updating a Job -to specify that your changes should take effect only if nobody else has -modified the Job in the meantime. -""") + @doc("The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime.") @visibility("read") eTag?: string; - @doc(""" -This is the last time at which the Job level data, such as the Job state or -priority, changed. It does not factor in task-level changes such as adding new -Tasks or Tasks changing state. -""") + @doc("The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state.") @visibility("read") lastModified?: utcDateTime; @@ -3127,7 +2260,7 @@ Tasks or Tasks changing state. @visibility("read") creationTime?: utcDateTime; - @doc("The state of the Job.") + @doc("The current state of the Job.") @visibility("read") state?: JobState; @@ -3135,197 +2268,185 @@ Tasks or Tasks changing state. @visibility("read") stateTransitionTime?: utcDateTime; - @doc("This property is not set if the Job is in its initial Active state.") + @doc("The previous state of the Job. This property is not set if the Job is in its initial Active state.") @visibility("read") previousState?: JobState; - @doc("This property is not set if the Job is in its initial Active state.") + @doc("The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state.") @visibility("read") previousStateTransitionTime?: utcDateTime; - @doc(""" -Priority values can range from -1000 to 1000, with -1000 being the lowest -priority and 1000 being the highest priority. The default value is 0. -""") + @doc("The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0.") priority?: int32; - @doc(""" -If the value is set to True, other high priority jobs submitted to the system -will take precedence and will be able requeue tasks from this job. You can -update a job's allowTaskPreemption after it has been created using the update -job API. -""") + @doc("Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API.") allowTaskPreemption?: boolean; - @doc(""" -The value of maxParallelTasks must be -1 or greater than 0 if specified. If not -specified, the default value is -1, which means there's no limit to the number -of tasks that can be run at once. You can update a job's maxParallelTasks after -it has been created using the update job API. -""") + @doc("The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API.") maxParallelTasks?: int32 = -1; - @doc("The execution constraints for a Job.") + @doc("The execution constraints for the Job.") constraints?: JobConstraints; - @doc(""" -The Job Manager Task is automatically started when the Job is created. The -Batch service tries to schedule the Job Manager Task before any other Tasks in -the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where -Job Manager Tasks are running for as long as possible (that is, Compute Nodes -running 'normal' Tasks are removed before Compute Nodes running Job Manager -Tasks). When a Job Manager Task fails and needs to be restarted, the system -tries to schedule it at the highest priority. If there are no idle Compute -Nodes available, the system may terminate one of the running Tasks in the Pool -and return it to the queue in order to make room for the Job Manager Task to -restart. Note that a Job Manager Task in one Job does not have priority over -Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For -example, if a Job Manager in a priority 0 Job needs to be restarted, it will -not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery -operation is triggered on a Node. Examples of recovery operations include (but -are not limited to) when an unhealthy Node is rebooted or a Compute Node -disappeared due to host failure. Retries due to recovery operations are -independent of and are not counted against the maxTaskRetryCount. Even if the -maxTaskRetryCount is 0, an internal retry due to a recovery operation may -occur. Because of this, all Tasks should be idempotent. This means Tasks need -to tolerate being interrupted and restarted without causing any corruption or -duplicate data. The best practice for long running Tasks is to use some form of -checkpointing. -""") - @visibility("read","create") + @doc("Details of a Job Manager Task to be launched when the Job is started.") + @visibility("read") jobManagerTask?: JobManagerTask; - @doc(""" -The Job Preparation Task is a special Task run on each Compute Node before any -other Task of the Job. -""") - @visibility("read","create") + @doc("The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job.") + @visibility("read") jobPreparationTask?: JobPreparationTask; - @doc(""" -The Job Release Task is a special Task run at the end of the Job on each -Compute Node that has run any other Task of the Job. -""") - @visibility("read","create") + @doc("The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job.") + @visibility("read") jobReleaseTask?: JobReleaseTask; - @doc(""" -Individual Tasks can override an environment setting specified here by -specifying the same setting name with a different value. -""") - @visibility("read","create") + @doc("The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value.") + @visibility("read") commonEnvironmentSettings?: EnvironmentSetting[]; - @doc("Specifies how a Job should be assigned to a Pool.") - poolInfo?: PoolInformation; + @doc("The Pool settings associated with the Job.") + poolInfo: PoolInformation; - @doc("The default is noaction.") + @doc("The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction.") onAllTasksComplete?: OnAllTasksComplete; - @doc(""" -A Task is considered to have failed if has a failureInfo. A failureInfo is set -if the Task completes with a non-zero exit code after exhausting its retry -count, or if there was an error starting the Task, for example due to a -resource file download error. The default is noaction. -""") - @visibility("read","create") + @doc("The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction.") + @visibility("read") onTaskFailure?: OnTaskFailure; @doc("The network configuration for the Job.") - @visibility("read","create") + @visibility("read") networkConfiguration?: JobNetworkConfiguration; - @doc(""" -The Batch service does not assign any meaning to metadata; it is solely for the -use of user code. -""") + @doc("A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.") metadata?: MetadataItem[]; - @doc("Contains information about the execution of a Job in the Azure Batch service.") + @doc("The execution information for the Job.") @visibility("read") executionInfo?: JobExecutionInformation; - @doc(""" -This property is populated only if the CloudJob was retrieved with an expand -clause including the 'stats' attribute; otherwise it is null. The statistics -may not be immediately available. The Batch service performs periodic roll-up -of statistics. The typical delay is about 30 minutes. -""") + @doc("Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.") @visibility("read") stats?: JobStatistics; } -@doc("Contains information about the execution of a Job in the Azure Batch service.") -model JobExecutionInformation { - @doc("This is the time at which the Job was created.") - startTime: utcDateTime; +@doc("Options for creating an Azure Batch Job.") +model BatchJobCreateOptions { + @doc("A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).") + id: string; + + @doc("The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.") + displayName?: string; + + @doc("Whether Tasks in the Job can define dependencies on each other. The default is false.") + usesTaskDependencies?: boolean; + + @doc("The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0.") + priority?: int32; + + @doc("Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API.") + allowTaskPreemption?: boolean; + + @doc("The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API.") + maxParallelTasks?: int32 = -1; + + @doc("The execution constraints for the Job.") + constraints?: JobConstraints; + + @doc("Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents.") + jobManagerTask?: JobManagerTask; + + @doc("The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node.") + jobPreparationTask?: JobPreparationTask; + + @doc("The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation.") + jobReleaseTask?: JobReleaseTask; + + @doc("The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value.") + commonEnvironmentSettings?: EnvironmentSetting[]; + + @doc("The Pool on which the Batch service runs the Job's Tasks.") + poolInfo: PoolInformation; + + @doc("The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction.") + onAllTasksComplete?: OnAllTasksComplete; + + @doc("The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction.") + onTaskFailure?: OnTaskFailure; + + @doc("The network configuration for the Job.") + networkConfiguration?: JobNetworkConfiguration; + + @doc("A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.") + metadata?: MetadataItem[]; +} - @doc("This property is set only if the Job is in the completed state.") +@doc("Options for updating an Azure Batch Job.") +model BatchJobUpdateOptions { + @doc("The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged.") + priority?: int32; + + @doc("Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API.") + allowTaskPreemption?: boolean; + + @doc("The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API.") + maxParallelTasks?: int32; + + @doc("The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged.") + constraints?: JobConstraints; + + @doc("The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool.") + poolInfo?: PoolInformation; + + @doc("The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") + onAllTasksComplete?: OnAllTasksComplete; + + @doc("A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged.") + metadata?: MetadataItem[]; +} + +@doc("Contains information about the execution of a Job in the Azure Batch service.") +model JobExecutionInformation { + @doc("The start time of the Job. This is the time at which the Job was created.") + startTime: utcDateTime; + + @doc("The completion time of the Job. This property is set only if the Job is in the completed state.") endTime?: utcDateTime; - @doc(""" -This element contains the actual Pool where the Job is assigned. When you get -Job details from the service, they also contain a poolInfo element, which -contains the Pool configuration data from when the Job was added or updated. -That poolInfo element may also contain a poolId element. If it does, the two -IDs are the same. If it does not, it means the Job ran on an auto Pool, and -this property contains the ID of that auto Pool. -""") + @doc("The ID of the Pool to which this Job is assigned. This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool.") poolId?: string; - @doc("This property is not set if there was no error starting the Job.") + @doc("Details of any error encountered by the service in starting the Job. This property is not set if there was no error starting the Job.") schedulingError?: JobSchedulingError; - @doc(""" -This property is set only if the Job is in the completed state. If the Batch -service terminates the Job, it sets the reason as follows: JMComplete - the Job -Manager Task completed, and killJobOnCompletion was set to true. -MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. -TerminateJobSchedule - the Job ran as part of a schedule, and the schedule -terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to -terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's -onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in -the Job failed with an exit condition that specified a jobAction of -terminatejob. Any other string is a user-defined reason specified in a call to -the 'Terminate a Job' operation. -""") + @doc("A string describing the reason the Job ended. This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation.") terminateReason?: string; } @doc("An error encountered by the Batch service when scheduling a Job.") model JobSchedulingError { - @doc("The category of the error.") + @doc("The category of the Job scheduling error.") category: ErrorCategory; - @doc(""" -An identifier for the Job scheduling error. Codes are invariant and are -intended to be consumed programmatically. -""") + @doc("An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically.") code?: string; - @doc(""" -A message describing the Job scheduling error, intended to be suitable for -display in a user interface. -""") + @doc("A message describing the Job scheduling error, intended to be suitable for display in a user interface.") message?: string; @doc("A list of additional error details related to the scheduling error.") details?: NameValuePair[]; } - -@doc("Options when disabling a Job.") -model BatchJobDisableParameters { +@doc("Options for disabling an Azure Batch Job.") +model BatchJobDisableOptions { @doc("What to do with active Tasks associated with the Job.") disableTasks: DisableJobOption; } -@doc("Options when terminating a Job.") -model BatchJobTerminateParameters { - @doc(""" -The text you want to appear as the Job's TerminateReason. The default is -'UserTerminate'. -""") +@doc("Options for terminating an Azure Batch Job.") +model BatchJobTerminateOptions { + @doc("The text you want to appear as the Job's TerminateReason. The default is 'UserTerminate'.") terminateReason?: string; } @@ -3336,9 +2457,10 @@ model BatchJobListResult { @items value?: BatchJob[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc(""" @@ -3351,9 +2473,10 @@ model BatchJobListPreparationAndReleaseTaskStatusResult { @items value?: JobPreparationAndReleaseTaskExecutionInformation[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc("The status of the Job Preparation and Job Release Tasks on a Compute Node.") @@ -3367,13 +2490,10 @@ model JobPreparationAndReleaseTaskExecutionInformation { @doc("The URL of the Compute Node to which this entry refers.") nodeUrl?: string; - @doc(""" -Contains information about the execution of a Job Preparation Task on a Compute -Node. -""") + @doc("Information about the execution status of the Job Preparation Task on this Compute Node.") jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformation; - @doc("This property is set only if the Job Release Task has run on the Compute Node.") + @doc("Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node.") jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformation; } @@ -3382,66 +2502,37 @@ Contains information about the execution of a Job Preparation Task on a Compute Node. """) model JobPreparationTaskExecutionInformation { - @doc(""" -If the Task has been restarted or retried, this is the most recent time at -which the Task started running. -""") + @doc("The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running.") startTime: utcDateTime; - @doc("This property is set only if the Task is in the Completed state.") + @doc("The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state.") endTime?: utcDateTime; @doc("The current state of the Job Preparation Task on the Compute Node.") state: JobPreparationTaskState; - @doc(""" -The root directory of the Job Preparation Task on the Compute Node. You can use -this path to retrieve files created by the Task, such as log files. -""") + @doc("The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files.") taskRootDirectory?: string; @doc("The URL to the root directory of the Job Preparation Task on the Compute Node.") taskRootDirectoryUrl?: string; - @doc(""" -This parameter is returned only if the Task is in the completed state. The exit -code for a process reflects the specific convention implemented by the -application developer for that process. If you use the exit code value to make -decisions in your code, be sure that you know the exit code convention used by -the application process. Note that the exit code may also be generated by the -Compute Node operating system, such as when a process is forcibly terminated. -""") + @doc("The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated.") exitCode?: int32; - @doc("This property is set only if the Task runs in a container context.") + @doc("Information about the container under which the Task is executing. This property is set only if the Task runs in a container context.") containerInfo?: TaskContainerExecutionInformation; - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") + @doc("Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure.") failureInfo?: TaskFailureInformation; - @doc(""" -Task application failures (non-zero exit code) are retried, pre-processing -errors (the Task could not be run) and file upload errors are not retried. The -Batch service will retry the Task up to the limit specified by the constraints. -""") + @doc("The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints.") retryCount: int32; - @doc(""" -This property is set only if the Task was retried (i.e. retryCount is nonzero). -If present, this is typically the same as startTime, but may be different if -the Task has been restarted for reasons other than retry; for example, if the -Compute Node was rebooted during a retry, then the startTime is updated but the -lastRetryTime is not. -""") + @doc("The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not.") lastRetryTime?: utcDateTime; - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") + @doc("The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property.") result?: TaskExecutionResult; } @@ -3450,34 +2541,22 @@ model TaskContainerExecutionInformation { @doc("The ID of the container.") containerId?: string; - @doc(""" -This is the state of the container according to the Docker service. It is -equivalent to the status field returned by \"docker inspect\". -""") + @doc("The state of the container. This is the state of the container according to the Docker service. It is equivalent to the status field returned by \"docker inspect\".") state?: string; - @doc(""" -This is the detailed error string from the Docker service, if available. It is -equivalent to the error field returned by \"docker inspect\". -""") + @doc("Detailed error information about the container. This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by \"docker inspect\".") error?: string; } @doc("Information about a Task failure.") model TaskFailureInformation { - @doc("The category of the error.") + @doc("The category of the Task error.") category: ErrorCategory; - @doc(""" -An identifier for the Task error. Codes are invariant and are intended to be -consumed programmatically. -""") + @doc("An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically.") code?: string; - @doc(""" -A message describing the Task error, intended to be suitable for display in a -user interface. -""") + @doc("A message describing the Task error, intended to be suitable for display in a user interface.") message?: string; @doc("A list of additional details related to the error.") @@ -3489,62 +2568,40 @@ Contains information about the execution of a Job Release Task on a Compute Node. """) model JobReleaseTaskExecutionInformation { - @doc(""" -If the Task has been restarted or retried, this is the most recent time at -which the Task started running. -""") + @doc("The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running.") startTime: utcDateTime; - @doc("This property is set only if the Task is in the Completed state.") + @doc("The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state.") endTime?: utcDateTime; @doc("The current state of the Job Release Task on the Compute Node.") state: JobReleaseTaskState; - @doc(""" -The root directory of the Job Release Task on the Compute Node. You can use -this path to retrieve files created by the Task, such as log files. -""") + @doc("The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files.") taskRootDirectory?: string; @doc("The URL to the root directory of the Job Release Task on the Compute Node.") taskRootDirectoryUrl?: string; - @doc(""" -This parameter is returned only if the Task is in the completed state. The exit -code for a process reflects the specific convention implemented by the -application developer for that process. If you use the exit code value to make -decisions in your code, be sure that you know the exit code convention used by -the application process. Note that the exit code may also be generated by the -Compute Node operating system, such as when a process is forcibly terminated. -""") + @doc("The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated.") exitCode?: int32; - @doc("This property is set only if the Task runs in a container context.") + @doc("Information about the container under which the Task is executing. This property is set only if the Task runs in a container context.") containerInfo?: TaskContainerExecutionInformation; - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") + @doc("Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure.") failureInfo?: TaskFailureInformation; - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") + @doc("The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property.") result?: TaskExecutionResult; } @doc("The Task and TaskSlot counts for a Job.") model TaskCountsResult { - @doc("The Task counts for a Job.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key + @doc("The number of Tasks per state.") taskCounts: TaskCounts; - @doc("The TaskSlot counts for a Job.") + @doc("The number of TaskSlots required by Tasks per state.") taskSlotCounts: TaskSlotCounts; } @@ -3559,16 +2616,10 @@ model TaskCounts { @doc("The number of Tasks in the completed state.") completed: int32; - @doc(""" -The number of Tasks which succeeded. A Task succeeds if its result (found in -the executionInfo property) is 'success'. -""") + @doc("The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'.") succeeded: int32; - @doc(""" -The number of Tasks which failed. A Task fails if its result (found in the -executionInfo property) is 'failure'. -""") + @doc("The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'.") failed: int32; } @@ -3592,40 +2643,23 @@ model TaskSlotCounts { @doc("A Pool in the Azure Batch service.") model BatchPool { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores, and cannot contain more than 64 characters. The ID is -case-preserving and case-insensitive (that is, you may not have two IDs within -an Account that differ only by case). -""") - @visibility("read","create") + @doc("A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).") + @visibility("read") id?: string; - @doc(""" -The display name need not be unique and can contain any Unicode characters up -to a maximum length of 1024. -""") - @visibility("read","create") + @doc("The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.") + @visibility("read") displayName?: string; @doc("The URL of the Pool.") @visibility("read") url?: string; - @doc(""" -This is an opaque string. You can use it to detect whether the Pool has changed -between requests. In particular, you can be pass the ETag when updating a Pool -to specify that your changes should take effect only if nobody else has -modified the Pool in the meantime. -""") + @doc("The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime.") @visibility("read") eTag?: string; - @doc(""" -This is the last time at which the Pool level data, such as the -targetDedicatedNodes or enableAutoscale settings, changed. It does not factor -in node-level changes such as a Compute Node changing state. -""") + @doc("The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state.") @visibility("read") lastModified?: utcDateTime; @@ -3649,41 +2683,23 @@ in node-level changes such as a Compute Node changing state. @visibility("read") allocationStateTransitionTime?: utcDateTime; - @doc(""" -For information about available sizes of virtual machines in Pools, see Choose -a VM size for Compute Nodes in an Azure Batch Pool -(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). -""") - @visibility("read","create") + @doc("The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).") + @visibility("read") vmSize?: string; - @doc(""" -This property and virtualMachineConfiguration are mutually exclusive and one of -the properties must be specified. This property cannot be specified if the -Batch Account was created with its poolAllocationMode property set to -'UserSubscription'. -""") - @visibility("read","create") + @doc("The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'.") + @visibility("read") cloudServiceConfiguration?: CloudServiceConfiguration; - @doc(""" -This property and cloudServiceConfiguration are mutually exclusive and one of -the properties must be specified. -""") - @visibility("read","create") + @doc("The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified.") + @visibility("read") virtualMachineConfiguration?: VirtualMachineConfiguration; - @doc(""" -This is the timeout for the most recent resize operation. (The initial sizing -when the Pool is created counts as a resize.) The default value is 15 minutes. -""") - @visibility("read","create") + @doc("The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes.") + @visibility("read") resizeTimeout?: duration; - @doc(""" -This property is set only if one or more errors occurred during the last Pool -resize, and only when the Pool allocationState is Steady. -""") + @doc("A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady.") @visibility("read") resizeErrors?: ResizeError[]; @@ -3691,164 +2707,233 @@ resize, and only when the Pool allocationState is Steady. @visibility("read") currentDedicatedNodes?: int32; - @doc(""" -Spot/Low-priority Compute Nodes which have been preempted are included in this -count. -""") + @doc("The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count.") @visibility("read") currentLowPriorityNodes?: int32; @doc("The desired number of dedicated Compute Nodes in the Pool.") - @visibility("read","create") + @visibility("read") targetDedicatedNodes?: int32; @doc("The desired number of Spot/Low-priority Compute Nodes in the Pool.") - @visibility("read","create") + @visibility("read") targetLowPriorityNodes?: int32; - @doc(""" -If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must -be specified. If true, the autoScaleFormula property is required and the Pool -automatically resizes according to the formula. The default value is false. -""") - @visibility("read","create") + @doc("Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false.") + @visibility("read") enableAutoScale?: boolean; - @doc(""" -This property is set only if the Pool automatically scales, i.e. -enableAutoScale is true. -""") - @visibility("read","create") + @doc("A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true.") + @visibility("read") autoScaleFormula?: string; - @doc(""" -This property is set only if the Pool automatically scales, i.e. -enableAutoScale is true. -""") - @visibility("read","create") + @doc("The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true.") + @visibility("read") autoScaleEvaluationInterval?: duration; - @doc(""" -This property is set only if the Pool automatically scales, i.e. -enableAutoScale is true. -""") + @doc("The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true.") @visibility("read") autoScaleRun?: AutoScaleRun; - @doc(""" -This imposes restrictions on which Compute Nodes can be assigned to the Pool. -Specifying this value can reduce the chance of the requested number of Compute -Nodes to be allocated in the Pool. -""") - @visibility("read","create") + @doc("Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool.") + @visibility("read") enableInterNodeCommunication?: boolean; - @doc("The network configuration for a Pool.") - @visibility("read","create") + @doc("The network configuration for the Pool.") + @visibility("read") networkConfiguration?: NetworkConfiguration; - @doc(""" -Batch will retry Tasks when a recovery operation is triggered on a Node. -Examples of recovery operations include (but are not limited to) when an -unhealthy Node is rebooted or a Compute Node disappeared due to host failure. -Retries due to recovery operations are independent of and are not counted -against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal -retry due to a recovery operation may occur. Because of this, all Tasks should -be idempotent. This means Tasks need to tolerate being interrupted and -restarted without causing any corruption or duplicate data. The best practice -for long running Tasks is to use some form of checkpointing. In some cases the -StartTask may be re-run even though the Compute Node was not rebooted. Special -care should be taken to avoid StartTasks which create breakaway process or -install/launch services from the StartTask working directory, as this will -block Batch from being able to re-run the StartTask. -""") + @doc("A Task specified to run on each Compute Node as it joins the Pool.") startTask?: StartTask; @doc(""" -For Windows Nodes, the Batch service installs the Certificates to the specified -Certificate store and location. For Linux Compute Nodes, the Certificates are -stored in a directory inside the Task working directory and an environment -variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this -location. For Certificates with visibility of 'remoteUser', a 'certs' directory -is created in the user's home directory (e.g., /home/{user-name}/certs) and -Certificates are placed in that directory. +For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. +For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. +For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. +Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. """) + @visibility("read") certificateReferences?: CertificateReference[]; - @doc(""" -Changes to Package references affect all new Nodes joining the Pool, but do not -affect Compute Nodes that are already in the Pool until they are rebooted or -reimaged. There is a maximum of 10 Package references on any given Pool. -""") + @doc("The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool.") + @visibility("read") applicationPackageReferences?: ApplicationPackageReference[]; - @doc(""" -The list of application licenses must be a subset of available Batch service -application licenses. If a license is requested which is not supported, Pool -creation will fail. -""") - @visibility("read","create") + @doc("The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail.") + @visibility("read") applicationLicenses?: string[]; - @doc(""" -The default value is 1. The maximum value is the smaller of 4 times the number -of cores of the vmSize of the pool or 256. -""") - @visibility("read","create") + @doc("The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.") + @visibility("read") taskSlotsPerNode?: int32; - @doc("If not specified, the default is spread.") - @visibility("read","create") + @doc("How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.") + @visibility("read") taskSchedulingPolicy?: TaskSchedulingPolicy; @doc("The list of user Accounts to be created on each Compute Node in the Pool.") - @visibility("read","create") + @visibility("read") userAccounts?: UserAccount[]; @doc("A list of name-value pairs associated with the Pool as metadata.") + @visibility("read") metadata?: MetadataItem[]; - @doc(""" -This property is populated only if the CloudPool was retrieved with an expand -clause including the 'stats' attribute; otherwise it is null. The statistics -may not be immediately available. The Batch service performs periodic roll-up -of statistics. The typical delay is about 30 minutes. -""") + @doc("Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.") @visibility("read") stats?: PoolStatistics; - @doc("This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.") - @visibility("read","create") + @doc("A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.") + @visibility("read") mountConfiguration?: MountConfiguration[]; - @doc(""" -The list of user identities associated with the Batch pool. The user identity -dictionary key references will be ARM resource ids in the form: -'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. -""") + @doc("The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.") @visibility("read") identity?: BatchPoolIdentity; - @doc("If omitted, the default value is Default.") + @doc("The desired node communication mode for the pool. If omitted, the default value is Default.") targetNodeCommunicationMode?: NodeCommunicationMode; - @doc("Determines how a pool communicates with the Batch service.") + @doc("The current state of the pool communication mode.") @visibility("read") currentNodeCommunicationMode?: NodeCommunicationMode; } -@doc("An error that occurred when resizing a Pool.") -model ResizeError { +@doc("Options for creating an Azure Batch Pool.") +model BatchPoolCreateOptions { + @doc("A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case).") + id: string; + + @doc("The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.") + displayName?: string; + + @doc("The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines for Cloud Services Pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and A2V2. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).") + vmSize: string; + + @doc("The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'.") + cloudServiceConfiguration?: CloudServiceConfiguration; + + @doc("The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified.") + virtualMachineConfiguration?: VirtualMachineConfiguration; + + @doc("The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") + resizeTimeout?: duration; + + @doc("The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.") + targetDedicatedNodes?: int32; + + @doc("The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.") + targetLowPriorityNodes?: int32; + + @doc("Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false.") + enableAutoScale?: boolean; + + @doc("A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/).") + autoScaleFormula?: string; + + @doc("The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") + autoScaleEvaluationInterval?: duration; + + @doc("Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.") + enableInterNodeCommunication?: boolean; + + @doc("The network configuration for the Pool.") + networkConfiguration?: NetworkConfiguration; + + @doc("A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted.") + startTask?: StartTask; + @doc(""" -An identifier for the Pool resize error. Codes are invariant and are intended -to be consumed programmatically. +For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. +For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. +For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. +Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. """) - code?: string; + certificateReferences?: CertificateReference[]; + + @doc("The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool.") + applicationPackageReferences?: ApplicationPackageReference[]; + + @doc("The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail.") + applicationLicenses?: string[]; + + @doc("The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.") + taskSlotsPerNode?: int32; + + @doc("How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.") + taskSchedulingPolicy?: TaskSchedulingPolicy; + + @doc("The list of user Accounts to be created on each Compute Node in the Pool.") + userAccounts?: UserAccount[]; + + @doc("A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code.") + metadata?: MetadataItem[]; + + @doc("Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.") + mountConfiguration?: MountConfiguration[]; + + @doc("The desired node communication mode for the pool. If omitted, the default value is Default.") + targetNodeCommunicationMode?: NodeCommunicationMode; +} + + +@doc("Options for replacing properties on an Azure Batch Pool.") +model BatchPoolReplaceOptions { + @doc("A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool.") + startTask?: StartTask; @doc(""" -A message describing the Pool resize error, intended to be suitable for display -in a user interface. +This list replaces any existing Certificate references configured on the Pool. +If you specify an empty collection, any existing Certificate references are removed from the Pool. +For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. +For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. +For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. +Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. """) + certificateReferences: CertificateReference[]; + + @doc("The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool.") + applicationPackageReferences: ApplicationPackageReference[]; + + @doc("A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool.") + metadata: MetadataItem[]; + + @doc("The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default.") + targetNodeCommunicationMode?: NodeCommunicationMode; +} + + +@doc("Options for updating an Azure Batch Pool.") +model BatchPoolUpdateOptions { + @doc("A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged.") + startTask?: StartTask; + + @doc(""" +If this element is present, it replaces any existing Certificate references configured on the Pool. +If omitted, any existing Certificate references are left unchanged. +For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. +For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. +For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. +Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. +""") + certificateReferences?: CertificateReference[]; + + @doc("A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged.") + applicationPackageReferences?: ApplicationPackageReference[]; + + @doc("A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged.") + metadata?: MetadataItem[]; + + @doc("The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged.") + targetNodeCommunicationMode?: NodeCommunicationMode; +} + +@doc("An error that occurred when resizing a Pool.") +model ResizeError { + @doc("An identifier for the Pool resize error. Codes are invariant and are intended to be consumed programmatically.") + code?: string; + + @doc("A message describing the Pool resize error, intended to be suitable for display in a user interface.") message?: string; @doc("A list of additional error details related to the Pool resize error.") @@ -3858,33 +2943,21 @@ in a user interface. @doc("The results and errors from an execution of a Pool autoscale formula.") model AutoScaleRun { @doc("The time at which the autoscale formula was last evaluated.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key timestamp: utcDateTime; - @doc(""" -Each variable value is returned in the form $variable=value, and variables are -separated by semicolons. -""") + @doc("The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form $variable=value, and variables are separated by semicolons.") results?: string; - @doc("An error that occurred when executing or evaluating a Pool autoscale formula.") + @doc("Details of the error encountered evaluating the autoscale formula on the Pool, if the evaluation was unsuccessful.") error?: AutoScaleRunError; } @doc("An error that occurred when executing or evaluating a Pool autoscale formula.") model AutoScaleRunError { - @doc(""" -An identifier for the autoscale error. Codes are invariant and are intended to -be consumed programmatically. -""") + @doc("An identifier for the autoscale error. Codes are invariant and are intended to be consumed programmatically.") code?: string; - @doc(""" -A message describing the autoscale error, intended to be suitable for display -in a user interface. -""") + @doc("A message describing the autoscale error, intended to be suitable for display in a user interface.") message?: string; @doc("A list of additional error details related to the autoscale error.") @@ -3893,24 +2966,16 @@ in a user interface. @doc("The identity of the Batch pool, if configured.") model BatchPoolIdentity { - @doc(""" -The list of user identities associated with the Batch pool. The user identity -dictionary key references will be ARM resource ids in the form: -'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. -""") + @doc("The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.") type: PoolIdentityType; - @doc(""" -The user identity dictionary key references will be ARM resource ids in the -form: -'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. -""") + @doc("The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.") userAssignedIdentities?: UserAssignedIdentity[]; } @doc("The user assigned Identity") model UserAssignedIdentity { - @doc("The ARM resource id of the user assigned identity") + @doc("The ARM resource id of the user assigned identity.") resourceId: string; @doc("The client id of the user assigned identity.") @@ -3929,81 +2994,52 @@ model BatchPoolListResult { @items value?: BatchPool[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } - -@doc("Options for enabling automatic scaling on a Pool.") -model BatchPoolEnableAutoScaleParameters { - @doc(""" -The formula is checked for validity before it is applied to the Pool. If the -formula is not valid, the Batch service rejects the request with detailed error -information. For more information about specifying this formula, see -Automatically scale Compute Nodes in an Azure Batch Pool -(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). -""") +@doc("Options for enabling automatic scaling on an Azure Batch Pool.") +model BatchPoolEnableAutoScaleOptions { + @doc("The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).") autoScaleFormula?: string; - @doc(""" -The default value is 15 minutes. The minimum and maximum value are 5 minutes -and 168 hours respectively. If you specify a value less than 5 minutes or -greater than 168 hours, the Batch service rejects the request with an invalid -property value error; if you are calling the REST API directly, the HTTP status -code is 400 (Bad Request). If you specify a new interval, then the existing -autoscale evaluation schedule will be stopped and a new autoscale evaluation -schedule will be started, with its starting time being the time when this -request was issued. -""") + @doc("The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued.") autoScaleEvaluationInterval?: duration; } -@doc("Options for evaluating an automatic scaling formula on a Pool.") -model BatchPoolEvaluateAutoScaleParameters { - @doc(""" -The formula is validated and its results calculated, but it is not applied to -the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a -Pool'. For more information about specifying this formula, see Automatically -scale Compute Nodes in an Azure Batch Pool -(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). -""") +@doc("Options for evaluating an automatic scaling formula on an Azure Batch Pool.") +model BatchPoolEvaluateAutoScaleOptions { + @doc("The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).") autoScaleFormula: string; } -@doc("Options for changing the size of a Pool.") -model BatchPoolResizeParameters { +@doc("Options for changing the size of an Azure Batch Pool.") +model BatchPoolResizeOptions { @doc("The desired number of dedicated Compute Nodes in the Pool.") targetDedicatedNodes?: int32; @doc("The desired number of Spot/Low-priority Compute Nodes in the Pool.") targetLowPriorityNodes?: int32; - @doc(""" -The default value is 15 minutes. The minimum value is 5 minutes. If you specify -a value less than 5 minutes, the Batch service returns an error; if you are -calling the REST API directly, the HTTP status code is 400 (Bad Request). -""") + @doc("The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") resizeTimeout?: duration; - @doc("The default value is requeue.") - nodeDeallocationOption?: ComputeNodeDeallocationOption; + @doc("Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue.") + nodeDeallocationOption?: BatchNodeDeallocationOption; } -@doc("Options for removing Compute Nodes from a Pool.") -model NodeRemoveParameters { - @doc("A maximum of 100 nodes may be removed per request.") +@doc("Options for removing nodes from an Azure Batch Pool.") +model NodeRemoveOptions { + @doc("A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request.") nodeList: string[]; - @doc(""" -The default value is 15 minutes. The minimum value is 5 minutes. If you specify -a value less than 5 minutes, the Batch service returns an error; if you are -calling the REST API directly, the HTTP status code is 400 (Bad Request). -""") + @doc("The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") resizeTimeout?: duration; - @doc("The default value is requeue.") - nodeDeallocationOption?: ComputeNodeDeallocationOption; + @doc("Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue.") + nodeDeallocationOption?: BatchNodeDeallocationOption; } @doc(""" @@ -4018,28 +3054,19 @@ restarted without causing any corruption or duplicate data. The best practice for long running Tasks is to use some form of checkpointing. """) model BatchTask { - @doc(""" -The ID can contain any combination of alphanumeric characters including hyphens -and underscores, and cannot contain more than 64 characters. -""") + @doc("A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters.") + @visibility("read") id?: string; - @doc(""" -The display name need not be unique and can contain any Unicode characters up -to a maximum length of 1024. -""") + @doc("A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.") + @visibility("read") displayName?: string; @doc("The URL of the Task.") @visibility("read") url?: string; - @doc(""" -This is an opaque string. You can use it to detect whether the Task has changed -between requests. In particular, you can be pass the ETag when updating a Task -to specify that your changes should take effect only if nobody else has -modified the Task in the meantime. -""") + @doc("The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime.") @visibility("read") eTag?: string; @@ -4052,9 +3079,10 @@ modified the Task in the meantime. creationTime?: utcDateTime; @doc("How the Batch service should respond when the Task completes.") + @visibility("read") exitConditions?: ExitConditions; - @doc("The state of the Task.") + @doc("The current state of the Task.") @visibility("read") state?: TaskState; @@ -4062,160 +3090,146 @@ modified the Task in the meantime. @visibility("read") stateTransitionTime?: utcDateTime; - @doc("This property is not set if the Task is in its initial Active state.") + @doc("The previous state of the Task. This property is not set if the Task is in its initial Active state.") @visibility("read") previousState?: TaskState; - @doc("This property is not set if the Task is in its initial Active state.") + @doc("The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state.") @visibility("read") previousStateTransitionTime?: utcDateTime; - @doc(""" -For multi-instance Tasks, the command line is executed as the primary Task, -after the primary Task and all subtasks have finished executing the -coordination command line. The command line does not run under a shell, and -therefore cannot take advantage of shell features such as environment variable -expansion. If you want to take advantage of such features, you should invoke -the shell in the command line, for example using \"cmd /c MyCommand\" in -Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to -file paths, it should use a relative path (relative to the Task working -directory), or use the Batch provided environment variable -(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). -""") + @doc("The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).") + @visibility("read") commandLine?: string; - @doc(""" -If the Pool that will run this Task has containerConfiguration set, this must -be set as well. If the Pool that will run this Task doesn't have -containerConfiguration set, this must not be set. When this is specified, all -directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure -Batch directories on the node) are mapped into the container, all Task -environment variables are mapped into the container, and the Task command line -is executed in the container. Files produced in the container outside of -AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that -Batch file APIs will not be able to access those files. -""") + @doc("The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.") + @visibility("read") containerSettings?: TaskContainerSettings; - @doc(""" -For multi-instance Tasks, the resource files will only be downloaded to the -Compute Node on which the primary Task is executed. There is a maximum size for -the list of resource files. When the max size is exceeded, the request will -fail and the response error code will be RequestEntityTooLarge. If this occurs, -the collection of ResourceFiles must be reduced in size. This can be achieved -using .zip files, Application Packages, or Docker Containers. -""") + @doc("A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.") + @visibility("read") resourceFiles?: ResourceFile[]; - @doc(""" -For multi-instance Tasks, the files will only be uploaded from the Compute Node -on which the primary Task is executed. -""") + @doc("A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed.") + @visibility("read") outputFiles?: OutputFile[]; @doc("A list of environment variable settings for the Task.") + @visibility("read") environmentSettings?: EnvironmentSetting[]; - @doc(""" -A locality hint that can be used by the Batch service to select a Compute Node -on which to start a Task. -""") + @doc("A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task.") + @visibility("read") affinityInfo?: AffinityInformation; - @doc("Execution constraints to apply to a Task.") + @doc("The execution constraints that apply to this Task.") constraints?: TaskConstraints; - @doc(""" -The default is 1. A Task can only be scheduled to run on a compute node if the -node has enough free scheduling slots available. For multi-instance Tasks, this -must be 1. -""") + @doc("The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1.") + @visibility("read") requiredSlots?: int32; - @doc("If omitted, the Task runs as a non-administrative user unique to the Task.") + @doc("The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task.") + @visibility("read") userIdentity?: UserIdentity; - @doc("Information about the execution of a Task.") + @doc("Information about the execution of the Task.") @visibility("read") executionInfo?: TaskExecutionInformation; - @doc("Information about the Compute Node on which a Task ran.") + @doc("Information about the Compute Node on which the Task ran.") @visibility("read") - nodeInfo?: ComputeNodeInformation; + nodeInfo?: BatchNodeInformation; - @doc(""" -Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, -if any of the subtasks fail (for example due to exiting with a non-zero exit -code) the entire multi-instance Task fails. The multi-instance Task is then -terminated and retried, up to its retry limit. -""") + @doc("An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task.") + @visibility("read") multiInstanceSettings?: MultiInstanceSettings; - @doc("Resource usage statistics for a Task.") + @doc("Resource usage statistics for the Task.") @visibility("read") stats?: TaskStatistics; - @doc(""" -This Task will not be scheduled until all Tasks that it depends on have -completed successfully. If any of those Tasks fail and exhaust their retry -counts, this Task will never be scheduled. -""") + @doc("The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled.") + @visibility("read") dependsOn?: TaskDependencies; - @doc(""" -Application packages are downloaded and deployed to a shared directory, not the -Task working directory. Therefore, if a referenced package is already on the -Node, and is up to date, then it is not re-downloaded; the existing copy on the -Compute Node is used. If a referenced Package cannot be installed, for example -because the package has been deleted or because download failed, the Task -fails. -""") + @doc("A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails.") + @visibility("read") applicationPackageReferences?: ApplicationPackageReference[]; - @doc(""" -If this property is set, the Batch service provides the Task with an -authentication token which can be used to authenticate Batch service operations -without requiring an Account access key. The token is provided via the -AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the -Task can carry out using the token depend on the settings. For example, a Task -can request Job permissions in order to add other Tasks to the Job, or check -the status of the Job or of other Tasks under the Job. + @doc("The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job.") + @visibility("read") + authenticationTokenSettings?: AuthenticationTokenSettings; +} + +@doc(""" +Options for creating an Azure Batch Task. """) +model BatchTaskCreateOptions { + @doc("A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case).") + id: string; + + @doc("A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.") + displayName?: string; + + @doc("How the Batch service should respond when the Task completes.") + exitConditions?: ExitConditions; + + @doc("The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).") + commandLine: string; + + @doc("The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.") + containerSettings?: TaskContainerSettings; + + @doc("A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.") + resourceFiles?: ResourceFile[]; + + @doc("A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed.") + outputFiles?: OutputFile[]; + + @doc("A list of environment variable settings for the Task.") + environmentSettings?: EnvironmentSetting[]; + + @doc("A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task.") + affinityInfo?: AffinityInformation; + + @doc("The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days.") + constraints?: TaskConstraints; + + @doc("The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1.") + requiredSlots?: int32; + + @doc("The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task.") + userIdentity?: UserIdentity; + + @doc("An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task.") + multiInstanceSettings?: MultiInstanceSettings; + + @doc("The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob.") + dependsOn?: TaskDependencies; + + @doc("A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails.") + applicationPackageReferences?: ApplicationPackageReference[]; + + @doc("The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job.") authenticationTokenSettings?: AuthenticationTokenSettings; } @doc("Specifies how the Batch service should respond when the Task completes.") model ExitConditions { - @doc(""" -A list of individual Task exit codes and how the Batch service should respond -to them. -""") + @doc("A list of individual Task exit codes and how the Batch service should respond to them.") exitCodes?: ExitCodeMapping[]; - @doc(""" -A list of Task exit code ranges and how the Batch service should respond to -them. -""") + @doc("A list of Task exit code ranges and how the Batch service should respond to them.") exitCodeRanges?: ExitCodeRangeMapping[]; - @doc("Specifies how the Batch service responds to a particular exit condition.") + @doc("How the Batch service should respond if the Task fails to start due to an error.") preProcessingError?: ExitOptions; - @doc(""" -If the Task exited with an exit code that was specified via exitCodes or -exitCodeRanges, and then encountered a file upload error, then the action -specified by the exit code takes precedence. -""") + @doc("How the Batch service should respond if a file upload error occurs. If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence.") fileUploadError?: ExitOptions; - @doc(""" -This value is used if the Task exits with any nonzero exit code not listed in -the exitCodes or exitCodeRanges collection, with a pre-processing error if the -preProcessingError property is not present, or with a file upload error if the -fileUploadError property is not present. If you want non-default behavior on -exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges -collection. -""") + @doc("How the Batch service should respond if the Task fails with an exit condition not covered by any of the other properties. This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection.") default?: ExitOptions; } @@ -4227,26 +3241,16 @@ model ExitCodeMapping { @doc("A process exit code.") code: int32; - @doc("Specifies how the Batch service responds to a particular exit condition.") + @doc("How the Batch service should respond if the Task exits with this exit code.") exitOptions: ExitOptions; } @doc("Specifies how the Batch service responds to a particular exit condition.") model ExitOptions { - @doc(""" -The default is none for exit code 0 and terminate for all other exit -conditions. If the Job's onTaskFailed property is noaction, then specifying -this property returns an error and the add Task request fails with an invalid -property value error; if you are calling the REST API directly, the HTTP status -code is 400 (Bad Request). -""") + @doc("An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") jobAction?: JobAction; - @doc(""" -Possible values are 'satisfy' (allowing dependent tasks to progress) and -'block' (dependent tasks continue to wait). Batch does not yet support -cancellation of dependent tasks. -""") + @doc("An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks.") dependencyAction?: DependencyAction; } @@ -4261,7 +3265,7 @@ model ExitCodeRangeMapping { @doc("The last exit code in the range.") end: int32; - @doc("Specifies how the Batch service responds to a particular exit condition.") + @doc("How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive).") exitOptions: ExitOptions; } @@ -4270,92 +3274,49 @@ A locality hint that can be used by the Batch service to select a Compute Node on which to start a Task. """) model AffinityInformation { - @doc(""" -You can pass the affinityId of a Node to indicate that this Task needs to run -on that Compute Node. Note that this is just a soft affinity. If the target -Compute Node is busy or unavailable at the time the Task is scheduled, then the -Task will be scheduled elsewhere. -""") + @doc("An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere.") affinityId: string; } @doc("Information about the execution of a Task.") model TaskExecutionInformation { - @doc(""" -'Running' corresponds to the running state, so if the Task specifies resource -files or Packages, then the start time reflects the time at which the Task -started downloading or deploying these. If the Task has been restarted or -retried, this is the most recent time at which the Task started running. This -property is present only for Tasks that are in the running or completed state. -""") + @doc("The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state.") startTime?: utcDateTime; - @doc("This property is set only if the Task is in the Completed state.") + @doc("The time at which the Task completed. This property is set only if the Task is in the Completed state.") endTime?: utcDateTime; - @doc(""" -This property is set only if the Task is in the completed state. In general, -the exit code for a process reflects the specific convention implemented by the -application developer for that process. If you use the exit code value to make -decisions in your code, be sure that you know the exit code convention used by -the application process. However, if the Batch service terminates the Task (due -to timeout, or user termination via the API) you may see an operating -system-defined exit code. -""") + @doc("The exit code of the program specified on the Task command line. This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code.") exitCode?: int32; - @doc("This property is set only if the Task runs in a container context.") + @doc("Information about the container under which the Task is executing. This property is set only if the Task runs in a container context.") containerInfo?: TaskContainerExecutionInformation; - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") + @doc("Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure.") failureInfo?: TaskFailureInformation; - @doc(""" -Task application failures (non-zero exit code) are retried, pre-processing -errors (the Task could not be run) and file upload errors are not retried. The -Batch service will retry the Task up to the limit specified by the constraints. -""") + @doc("The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints.") retryCount: int32; - @doc(""" -This element is present only if the Task was retried (i.e. retryCount is -nonzero). If present, this is typically the same as startTime, but may be -different if the Task has been restarted for reasons other than retry; for -example, if the Compute Node was rebooted during a retry, then the startTime is -updated but the lastRetryTime is not. -""") + @doc("The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not.") lastRetryTime?: utcDateTime; - @doc(""" -When the user removes Compute Nodes from a Pool (by resizing/shrinking the -pool) or when the Job is being disabled, the user can specify that running -Tasks on the Compute Nodes be requeued for execution. This count tracks how -many times the Task has been requeued for these reasons. -""") + @doc("The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons.") requeueCount: int32; - @doc("This property is set only if the requeueCount is nonzero.") + @doc("The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero.") lastRequeueTime?: utcDateTime; - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") + @doc("The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property.") result?: TaskExecutionResult; } @doc("Information about the Compute Node on which a Task ran.") -model ComputeNodeInformation { - @doc(""" -An identifier for the Node on which the Task ran, which can be passed when -adding a Task to request that the Task be scheduled on this Compute Node. -""") +model BatchNodeInformation { + @doc("An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node.") affinityId?: string; - @doc("The URL of the Compute Node on which the Task ran. ") + @doc("The URL of the Compute Node on which the Task ran.") nodeUrl?: string; @doc("The ID of the Pool on which the Task ran.") @@ -4378,30 +3339,17 @@ code) the entire multi-instance Task fails. The multi-instance Task is then terminated and retried, up to its retry limit. """) model MultiInstanceSettings { - @doc("If omitted, the default is 1.") + @doc("The number of Compute Nodes required by the Task. If omitted, the default is 1.") numberOfInstances?: int32; - @doc(""" -A typical coordination command line launches a background service and verifies -that the service is ready to process inter-node messages. -""") + @doc("The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages.") coordinationCommandLine: string; - @doc(""" -The difference between common resource files and Task resource files is that -common resource files are downloaded for all subtasks including the primary, -whereas Task resource files are downloaded only for the primary. Also note that -these resource files are not downloaded to the Task working directory, but -instead are downloaded to the Task root directory (one directory above the -working directory). There is a maximum size for the list of resource files. -When the max size is exceeded, the request will fail and the response error -code will be RequestEntityTooLarge. If this occurs, the collection of -ResourceFiles must be reduced in size. This can be achieved using .zip files, -Application Packages, or Docker Containers. -""") + @doc("A list of files that the Batch service will download before running the coordination command line. The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.") commonResourceFiles?: ResourceFile[]; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("Resource usage statistics for a Task.") model TaskStatistics { @doc("The URL of the statistics.") @@ -4410,30 +3358,16 @@ model TaskStatistics { @doc("The start time of the time range covered by the statistics.") startTime: utcDateTime; - @doc(""" -The time at which the statistics were last updated. All statistics are limited -to the range between startTime and lastUpdateTime. -""") + @doc("The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime.") lastUpdateTime: utcDateTime; - @doc(""" -The total user mode CPU time (summed across all cores and all Compute Nodes) -consumed by the Task. -""") + @doc("The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task.") userCPUTime: duration; - @doc(""" -The total kernel mode CPU time (summed across all cores and all Compute Nodes) -consumed by the Task. -""") + @doc("The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task.") kernelCPUTime: duration; - @doc(""" -The wall clock time is the elapsed time from when the Task started running on a -Compute Node to when it finished (or to the last time the statistics were -updated, if the Task had not finished by then). If the Task was retried, this -includes the wall clock time of all the Task retries. -""") + @doc("The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries.") wallClockTime: duration; @doc("The total number of disk read operations made by the Task.") @@ -4448,12 +3382,7 @@ includes the wall clock time of all the Task retries. @doc("The total gibibytes written to disk by the Task.") writeIOGiB: float32; - @doc(""" -The total wait time of the Task. The wait time for a Task is defined as the -elapsed time between the creation of the Task and the start of Task execution. -(If the Task is retried due to failures, the wait time is the time to the most -recent Task execution.) -""") + @doc("The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.).") waitTime: duration; } @@ -4463,18 +3392,10 @@ within a dependency range must complete before the dependant Task will be scheduled. """) model TaskDependencies { - @doc(""" -The taskIds collection is limited to 64000 characters total (i.e. the combined -length of all Task IDs). If the taskIds collection exceeds the maximum length, -the Add Task request fails with error code TaskDependencyListTooLong. In this -case consider using Task ID ranges instead. -""") + @doc("The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead.") taskIds?: string[]; - @doc(""" -The list of Task ID ranges that this Task depends on. All Tasks in all ranges -must complete successfully before the dependent Task can be scheduled. -""") + @doc("The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete successfully before the dependent Task can be scheduled.") taskIdRanges?: TaskIdRange[]; } @@ -4497,20 +3418,16 @@ model BatchTaskListResult { @items value?: BatchTask[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } @doc("A collection of Azure Batch Tasks to add.") model BatchTaskCollection { - @doc(""" -The total serialized size of this collection must be less than 1MB. If it is -greater than 1MB (for example if each Task has 100's of resource files or -environment variables), the request will fail with code 'RequestBodyTooLarge' -and should be retried again with fewer Tasks. -""") - value: BatchTask[]; + @doc("The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks.") + value: BatchTaskCreateOptions[]; } @doc("The result of adding a collection of Tasks to a Job.") @@ -4527,12 +3444,7 @@ model TaskAddResult { @doc("The ID of the Task for which this is the result.") taskId: string; - @doc(""" -You can use this to detect whether the Task has changed between requests. In -particular, you can be pass the ETag with an Update Task request to specify -that your changes should take effect only if nobody else has modified the Job -in the meantime. -""") + @doc("The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime.") eTag?: string; @doc("The last modified time of the Task.") @@ -4541,7 +3453,7 @@ in the meantime. @doc("The URL of the Task, if the Task was successfully added.") location?: string; - @doc("An error response received from the Azure Batch service.") + @doc("The error encountered while attempting to add the Task.") error?: BatchError; } @@ -4556,135 +3468,80 @@ model SubtaskInformation { @doc("The ID of the subtask.") id?: int32; - @doc("Information about the Compute Node on which a Task ran.") - nodeInfo?: ComputeNodeInformation; + @doc("Information about the Compute Node on which the subtask ran.") + nodeInfo?: BatchNodeInformation; - @doc(""" -The time at which the subtask started running. If the subtask has been -restarted or retried, this is the most recent time at which the subtask started -running. -""") + @doc("The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running.") startTime?: utcDateTime; - @doc("This property is set only if the subtask is in the Completed state.") + @doc("The time at which the subtask completed. This property is set only if the subtask is in the Completed state.") endTime?: utcDateTime; - @doc(""" -This property is set only if the subtask is in the completed state. In general, -the exit code for a process reflects the specific convention implemented by the -application developer for that process. If you use the exit code value to make -decisions in your code, be sure that you know the exit code convention used by -the application process. However, if the Batch service terminates the subtask -(due to timeout, or user termination via the API) you may see an operating -system-defined exit code. -""") + @doc("The exit code of the program specified on the subtask command line. This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code.") exitCode?: int32; - @doc("This property is set only if the Task runs in a container context.") + @doc("Information about the container under which the Task is executing. This property is set only if the Task runs in a container context.") containerInfo?: TaskContainerExecutionInformation; - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") + @doc("Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure.") failureInfo?: TaskFailureInformation; - @doc("The state of the subtask.") + @doc("The current state of the subtask.") state?: SubtaskState; @doc("The time at which the subtask entered its current state.") stateTransitionTime?: utcDateTime; - @doc("This property is not set if the subtask is in its initial running state.") + @doc("The previous state of the subtask. This property is not set if the subtask is in its initial running state.") previousState?: SubtaskState; - @doc("This property is not set if the subtask is in its initial running state.") + @doc("The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state.") previousStateTransitionTime?: utcDateTime; - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") + @doc("The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property.") result?: TaskExecutionResult; } -@doc("A user Account for RDP or SSH access on a Compute Node.") -model ComputeNodeUser { +@doc("Options for creating a user account for RDP or SSH access on an Azure Batch Compute Node.") +model BatchNodeUserCreateOptions { @doc("The user name of the Account.") name: string; - @doc("The default value is false.") + @doc("Whether the Account should be an administrator on the Compute Node. The default value is false.") isAdmin?: boolean; - @doc(""" -If omitted, the default is 1 day from the current time. For Linux Compute -Nodes, the expiryTime has a precision up to a day. -""") + @doc("The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day.") expiryTime?: utcDateTime; - @doc(""" -The password is required for Windows Compute Nodes (those created with -'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' -using a Windows Image reference). For Linux Compute Nodes, the password can -optionally be specified along with the sshPublicKey property. -""") + @doc("The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property.") password?: string; - @doc(""" -The public key should be compatible with OpenSSH encoding and should be base 64 -encoded. This property can be specified only for Linux Compute Nodes. If this -is specified for a Windows Compute Node, then the Batch service rejects the -request; if you are calling the REST API directly, the HTTP status code is 400 -(Bad Request). -""") + @doc("The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).") sshPublicKey?: string; } -@doc("The set of changes to be made to a user Account on a Compute Node.") -model NodeUpdateUserParameters { - @doc(""" -The password is required for Windows Compute Nodes (those created with -'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' -using a Windows Image reference). For Linux Compute Nodes, the password can -optionally be specified along with the sshPublicKey property. If omitted, any -existing password is removed. -""") +@doc("Options for updating a user account for RDP or SSH access on an Azure Batch Compute Node.") +model BatchNodeUserUpdateOptions { + @doc("The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed.") password?: string; - @doc(""" -If omitted, the default is 1 day from the current time. For Linux Compute -Nodes, the expiryTime has a precision up to a day. -""") + @doc("The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day.") expiryTime?: utcDateTime; - @doc(""" -The public key should be compatible with OpenSSH encoding and should be base 64 -encoded. This property can be specified only for Linux Compute Nodes. If this -is specified for a Windows Compute Node, then the Batch service rejects the -request; if you are calling the REST API directly, the HTTP status code is 400 -(Bad Request). If omitted, any existing SSH public key is removed. -""") + @doc("The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed.") sshPublicKey?: string; } @doc("A Compute Node in the Batch service.") -model ComputeNode { - @doc(""" -Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a -Compute Node is removed from a Pool, all of its local files are deleted, and -the ID is reclaimed and could be reused for new Compute Nodes. -""") +model BatchNode { + @doc("The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes.") id?: string; @doc("The URL of the Compute Node.") url?: string; - @doc(""" -The Spot/Low-priority Compute Node has been preempted. Tasks which were running -on the Compute Node when it was preempted will be rescheduled when another -Compute Node becomes available. -""") - state?: ComputeNodeState; + @doc("The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available.") + state?: BatchNodeState; @doc("Whether the Compute Node is available for Task scheduling.") schedulingState?: SchedulingState; @@ -4692,119 +3549,60 @@ Compute Node becomes available. @doc("The time at which the Compute Node entered its current state.") stateTransitionTime?: utcDateTime; - @doc("This property may not be present if the Compute Node state is unusable.") + @doc("The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable.") lastBootTime?: utcDateTime; - @doc(""" -This is the time when the Compute Node was initially allocated and doesn't -change once set. It is not updated when the Compute Node is service healed or -preempted. -""") + @doc("The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted.") allocationTime?: utcDateTime; - @doc(""" -Every Compute Node that is added to a Pool is assigned a unique IP address. -Whenever a Compute Node is removed from a Pool, all of its local files are -deleted, and the IP address is reclaimed and could be reused for new Compute -Nodes. -""") + @doc("The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes.") ipAddress?: string; - @doc(""" -Note that this is just a soft affinity. If the target Compute Node is busy or -unavailable at the time the Task is scheduled, then the Task will be scheduled -elsewhere. -""") + @doc("An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere.") affinityId?: string; - @doc(""" -For information about available sizes of virtual machines in Pools, see Choose -a VM size for Compute Nodes in an Azure Batch Pool -(https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). -""") + @doc("The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).") vmSize?: string; - @doc(""" -The total number of Job Tasks completed on the Compute Node. This includes Job -Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start -Tasks. -""") + @doc("The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks.") totalTasksRun?: int32; - @doc(""" -The total number of currently running Job Tasks on the Compute Node. This -includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job -Release or Start Tasks. -""") + @doc("The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks.") runningTasksCount?: int32; - @doc(""" -The total number of scheduling slots used by currently running Job Tasks on the -Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job -Preparation, Job Release or Start Tasks. -""") + @doc("The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks.") runningTaskSlotsCount?: int32; - @doc(""" -The total number of Job Tasks which completed successfully (with exitCode 0) on -the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job -Preparation, Job Release or Start Tasks. -""") + @doc("The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks.") totalTasksSucceeded?: int32; - @doc(""" -This property is present only if at least one Task has run on this Compute Node -since it was assigned to the Pool. -""") + @doc("A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool.") recentTasks?: TaskInformation[]; - @doc(""" -Batch will retry Tasks when a recovery operation is triggered on a Node. -Examples of recovery operations include (but are not limited to) when an -unhealthy Node is rebooted or a Compute Node disappeared due to host failure. -Retries due to recovery operations are independent of and are not counted -against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal -retry due to a recovery operation may occur. Because of this, all Tasks should -be idempotent. This means Tasks need to tolerate being interrupted and -restarted without causing any corruption or duplicate data. The best practice -for long running Tasks is to use some form of checkpointing. In some cases the -StartTask may be re-run even though the Compute Node was not rebooted. Special -care should be taken to avoid StartTasks which create breakaway process or -install/launch services from the StartTask working directory, as this will -block Batch from being able to re-run the StartTask. -""") + @doc("The Task specified to run on the Compute Node as it joins the Pool.") startTask?: StartTask; - @doc("Information about a StartTask running on a Compute Node.") + @doc("Runtime information about the execution of the StartTask on the Compute Node.") startTaskInfo?: StartTaskInformation; @doc(""" -For Windows Nodes, the Batch service installs the Certificates to the specified -Certificate store and location. For Linux Compute Nodes, the Certificates are -stored in a directory inside the Task working directory and an environment -variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this -location. For Certificates with visibility of 'remoteUser', a 'certs' directory -is created in the user's home directory (e.g., /home/{user-name}/certs) and -Certificates are placed in that directory. +For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. +For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. +For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. +Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. """) certificateReferences?: CertificateReference[]; @doc("The list of errors that are currently being encountered by the Compute Node.") - errors?: ComputeNodeError[]; + errors?: BatchNodeError[]; - @doc(""" -Whether this Compute Node is a dedicated Compute Node. If false, the Compute -Node is a Spot/Low-priority Compute Node. -""") + @doc("Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node.") isDedicated?: boolean; @doc("The endpoint configuration for the Compute Node.") - endpointConfiguration?: ComputeNodeEndpointConfiguration; + endpointConfiguration?: BatchNodeEndpointConfiguration; - @doc(""" -The Batch Compute Node agent is a program that runs on each Compute Node in the -Pool and provides Batch capability on the Compute Node. -""") + @doc("Information about the Compute Node agent version and the time the Compute Node upgraded to a new version.") nodeAgentInfo?: NodeAgentInformation; @doc("Info about the current state of the virtual machine.") @@ -4825,10 +3623,10 @@ model TaskInformation { @doc("The ID of the subtask if the Task is a multi-instance Task.") subtaskId?: int32; - @doc("The state of the Task.") + @doc("The current state of the Task.") taskState: TaskState; - @doc("Information about the execution of a Task.") + @doc("Information about the execution of the Task.") executionInfo?: TaskExecutionInformation; } @@ -4837,74 +3635,37 @@ model StartTaskInformation { @doc("The state of the StartTask on the Compute Node.") state: StartTaskState; - @doc(""" -This value is reset every time the Task is restarted or retried (that is, this -is the most recent time at which the StartTask started running). -""") + @doc("The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running).") startTime: utcDateTime; - @doc(""" -This is the end time of the most recent run of the StartTask, if that run has -completed (even if that run failed and a retry is pending). This element is not -present if the StartTask is currently running. -""") + @doc("The time at which the StartTask stopped running. This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running.") endTime?: utcDateTime; - @doc(""" -This property is set only if the StartTask is in the completed state. In -general, the exit code for a process reflects the specific convention -implemented by the application developer for that process. If you use the exit -code value to make decisions in your code, be sure that you know the exit code -convention used by the application process. However, if the Batch service -terminates the StartTask (due to timeout, or user termination via the API) you -may see an operating system-defined exit code. -""") + @doc("The exit code of the program specified on the StartTask command line. This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code.") exitCode?: int32; - @doc("This property is set only if the Task runs in a container context.") + @doc("Information about the container under which the Task is executing. This property is set only if the Task runs in a container context.") containerInfo?: TaskContainerExecutionInformation; - @doc(""" -This property is set only if the Task is in the completed state and encountered -a failure. -""") + @doc("Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure.") failureInfo?: TaskFailureInformation; - @doc(""" -Task application failures (non-zero exit code) are retried, pre-processing -errors (the Task could not be run) and file upload errors are not retried. The -Batch service will retry the Task up to the limit specified by the constraints. -""") + @doc("The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints.") retryCount: int32; - @doc(""" -This element is present only if the Task was retried (i.e. retryCount is -nonzero). If present, this is typically the same as startTime, but may be -different if the Task has been restarted for reasons other than retry; for -example, if the Compute Node was rebooted during a retry, then the startTime is -updated but the lastRetryTime is not. -""") + @doc("The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not.") lastRetryTime?: utcDateTime; - @doc(""" -If the value is 'failed', then the details of the failure can be found in the -failureInfo property. -""") + @doc("The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property.") result?: TaskExecutionResult; } @doc("An error encountered by a Compute Node.") -model ComputeNodeError { - @doc(""" -An identifier for the Compute Node error. Codes are invariant and are intended -to be consumed programmatically. -""") +model BatchNodeError { + @doc("An identifier for the Compute Node error. Codes are invariant and are intended to be consumed programmatically.") code?: string; - @doc(""" -A message describing the Compute Node error, intended to be suitable for -display in a user interface. -""") + @doc("A message describing the Compute Node error, intended to be suitable for display in a user interface.") message?: string; @doc("The list of additional error details related to the Compute Node error.") @@ -4912,11 +3673,12 @@ display in a user interface. } @doc("The endpoint configuration for the Compute Node.") -model ComputeNodeEndpointConfiguration { +model BatchNodeEndpointConfiguration { @doc("The list of inbound endpoints that are accessible on the Compute Node.") inboundEndpoints: InboundEndpoint[]; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("An inbound endpoint on a Compute Node.") model InboundEndpoint { @doc("The name of the endpoint.") @@ -4926,10 +3688,11 @@ model InboundEndpoint { protocol: InboundEndpointProtocol; @doc("The public IP address of the Compute Node.") - publicIPAddress: string; + @projectedName("client", "publicIpAddress") + publicIPAddress?: string; @doc("The public fully qualified domain name for the Compute Node.") - publicFQDN: string; + publicFQDN?: string; @doc("The public port number of the endpoint.") frontendPort: int32; @@ -4943,101 +3706,66 @@ The Batch Compute Node agent is a program that runs on each Compute Node in the Pool and provides Batch capability on the Compute Node. """) model NodeAgentInformation { - @doc(""" -This version number can be checked against the Compute Node agent release notes -located at -https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. -""") + @doc("The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md.") version: string; - @doc(""" -This is the most recent time that the Compute Node agent was updated to a new -version. -""") + @doc("The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version.") lastUpdateTime: utcDateTime; } @doc("Info about the current state of the virtual machine.") model VirtualMachineInfo { - @doc(""" -A reference to an Azure Virtual Machines Marketplace Image or a Shared Image -Gallery Image. To get the list of all Azure Marketplace Image references -verified by Azure Batch, see the 'List Supported Images' operation. -""") + @doc("The reference to the Azure Virtual Machine's Marketplace Image.") imageReference?: ImageReference; } -@doc("Options for rebooting a Compute Node.") -model NodeRebootParameters { - @doc("The default value is requeue.") - nodeRebootOption?: ComputeNodeRebootOption; +@doc("Options for rebooting an Azure Batch Compute Node.") +model NodeRebootOptions { + @doc("When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue.") + nodeRebootOption?: BatchNodeRebootOption; } -@doc("Options for reimaging a Compute Node.") -model NodeReimageParameters { - @doc("The default value is requeue.") - nodeReimageOption?: ComputeNodeReimageOption; +@doc("Options for reimaging an Azure Batch Compute Node.") +model NodeReimageOptions { + @doc("When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue.") + nodeReimageOption?: BatchNodeReimageOption; } -@doc("Options for disabling scheduling on a Compute Node.") -model NodeDisableSchedulingParameters { - @doc("The default value is requeue.") - nodeDisableSchedulingOption?: DisableComputeNodeSchedulingOption; +@doc("Options for disabling scheduling on an Azure Batch Compute Node.") +model NodeDisableSchedulingOptions { + @doc("What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue.") + nodeDisableSchedulingOption?: DisableBatchNodeSchedulingOption; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The remote login settings for a Compute Node.") -model ComputeNodeGetRemoteLoginSettingsResult { +model BatchNodeRemoteLoginSettingsResult { @doc("The IP address used for remote login to the Compute Node.") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key + @projectedName("client", "remoteLoginIpAddress") remoteLoginIPAddress: string; @doc("The port used for remote login to the Compute Node.") remoteLoginPort: int32; } -@doc("The Azure Batch service log files upload configuration for a Compute Node.") -model UploadBatchServiceLogsConfiguration { - @doc(""" -If a user assigned managed identity is not being used, the URL must include a -Shared Access Signature (SAS) granting write permissions to the container. The -SAS duration must allow enough time for the upload to finish. The start time -for SAS is optional and recommended to not be specified. -""") +@doc("The Azure Batch service log files upload options for a Compute Node.") +model UploadBatchServiceLogsOptions { + @doc("The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified.") containerUrl: string; - @doc(""" -Any log file containing a log message in the time range will be uploaded. This -means that the operation might retrieve more logs than have been requested -since the entire log file is always uploaded, but the operation should not -retrieve fewer logs than have been requested. -""") + @doc("The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested.") startTime: utcDateTime; - @doc(""" -Any log file containing a log message in the time range will be uploaded. This -means that the operation might retrieve more logs than have been requested -since the entire log file is always uploaded, but the operation should not -retrieve fewer logs than have been requested. If omitted, the default is to -upload all logs available after the startTime. -""") + @doc("The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime.") endTime?: utcDateTime; - @doc("The identity must have write access to the Azure Blob Storage container.") - identityReference?: ComputeNodeIdentityReference; + @doc("The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container.") + identityReference?: BatchNodeIdentityReference; } @doc("The result of uploading Batch service log files from a specific Compute Node.") -@resource("pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs") model UploadBatchServiceLogsResult { - @doc(""" -The virtual directory name is part of the blob name for each log file uploaded, -and it is built based poolId, nodeId and a unique identifier. -""") - // FIXME: (resource-key-guessing) - Verify that this property is the resource key, if not please update the model with the right one - @visibility("read") - @key + @doc("The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier.") virtualDirectoryName: string; @doc("The number of log files which will be uploaded.") @@ -5046,28 +3774,31 @@ and it is built based poolId, nodeId and a unique identifier. @doc("The result of listing the Compute Nodes in a Pool.") @pagedResult -model ComputeNodeListResult { +model BatchNodeListResult { @doc("The list of Compute Nodes.") @items - value?: ComputeNode[]; + value?: BatchNode[]; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; + `odata.nextLink`?: string; } +#suppress "@azure-tools/typespec-azure-core/casing-style" @doc("The configuration for virtual machine extension instance view.") model NodeVMExtension { @doc("The provisioning state of the virtual machine extension.") provisioningState?: string; - @doc("The configuration for virtual machine extensions.") + @doc("The virtual machine extension.") vmExtension?: VMExtension; @doc("The vm extension instance view.") instanceView?: VMExtensionInstanceView; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Property types must use camelCase" @doc("The vm extension instance view.") model VMExtensionInstanceView { @doc("The name of the vm extension instance view.") @@ -5098,6 +3829,7 @@ model InstanceViewStatus { time?: string; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("The result of listing the Compute Node extensions in a Node.") @pagedResult model NodeVMExtensionList { @@ -5107,5 +3839,5 @@ model NodeVMExtensionList { @doc("The URL to get the next set of results.") @nextLink - "odata.nextLink"?: string; -} \ No newline at end of file + `odata.nextLink`?: string; +} diff --git a/packages/typespec-test/test/batch_modular/spec/routes.tsp b/packages/typespec-test/test/batch_modular/spec/routes.tsp index 6e3a0133f9..48404fc0c5 100644 --- a/packages/typespec-test/test/batch_modular/spec/routes.tsp +++ b/packages/typespec-test/test/batch_modular/spec/routes.tsp @@ -1,26 +1,57 @@ import "@typespec/rest"; import "@typespec/versioning"; +import "@typespec/http"; import "@azure-tools/typespec-azure-core"; -import "@azure-tools/typespec-autorest"; -import "@typespec/openapi"; -import "./models.tsp"; +import "@typespec/rest"; import "@azure-tools/typespec-client-generator-core"; +import "./models.tsp"; -using TypeSpec.Reflection; using TypeSpec.Http; using TypeSpec.Rest; -using Autorest; using TypeSpec.Versioning; using Azure.Core; -using OpenAPI; +using Azure.Core.Traits; using Azure.ClientGenerator.Core; -namespace BatchService; - +namespace Azure.Batch; + +// Templates + +/** + * A remote procedure call (RPC) operation with additional response. + * @template TParams Object describing the parameters of the operation. + * @template TResponse Object describing the response of the operation. + * @template TAdditionalResponse Object describing the response of the operation. + * @template Traits Object describing the traits of the operation. + * @template TErrorResponse Error response of the operation. If not specified, the default error response is used. + */ +@doc("New rpc operation allowing multiple repsonses") +@Foundations.Private.needsRoute +op RpcOperationWithAdditionalResponse< + TParams, + TResponse extends TypeSpec.Reflection.Model, + TAdditionalResponse extends {}, + Traits extends {} = {}, + TErrorResponse = Azure.Core.Foundations.ErrorResponse +> is Foundations.Operation< + TParams & + Azure.Core.Traits.Private.TraitProperties< + Traits, + TraitLocation.Parameters + >, + (TResponse & + Azure.Core.Traits.Private.TraitProperties< + Traits, + TraitLocation.Response + >) | TAdditionalResponse, + Traits, + TErrorResponse +>; // Interfaces //////////////////// -@operationGroup -@tag("Applications") + +#suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Operations defined using RpcOperation should not have path parameters." +#suppress "@azure-tools/typespec-azure-core/rpc-operation-request-body" "RPCOperation with '@get' cannot have a body." interface Applications { @summary("Lists all of the applications available in the specified Account.") @doc(""" @@ -30,14 +61,15 @@ administrator information about applications and versions that are not yet available to Compute Nodes, use the Azure portal or the Azure Resource Manager API. """) - @example("./examples/ApplicationList.json", "List applications") + @route("/applications") - @operationId("Application_List") @get - ListApplications is Azure.Core.Foundations.Operation< + listApplications is RpcOperation< BatchApplicationListHeaders, - BatchResponseHeaders & ApplicationListResult - > ; + BatchResponseHeaders & ApplicationListResult, + {}, + BatchError + >; @summary("Gets information about the specified Application.") @doc(""" @@ -47,19 +79,25 @@ administrator information about Applications and versions that are not yet available to Compute Nodes, use the Azure portal or the Azure Resource Manager API. """) - @example("./examples/ApplicationGet.json", "Get applications") - Get is Azure.Core.ResourceRead< - Application, - { - parameters: BatchClientRequestHeaders; - response: BatchError; - } + @route("/applications") + @get + getApplication is RpcOperation< + BatchClientRequestHeaders & { + @doc("The ID of the Application") + @path + applicationId: string; + }, + BatchResponseHeaders & BatchApplication, + {}, + BatchError >; } -@operationGroup -@tag("Pools") -interface Pool { +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" +#suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Operations defined using RpcOperation should not have path parameters." +#suppress "@azure-tools/typespec-azure-core/use-standard-operations" "Operation 'Exists' should be defined using a signature from the Azure.Core namespace." +#suppress "@azure-tools/typespec-azure-core/rpc-operation-request-body" "RPCOperation with '@get' cannot have a body." +interface Pools { @summary(""" Lists the usage metrics, aggregated by Pool across individual time intervals, for the specified Account. @@ -72,89 +110,74 @@ including a startTime or endTime these filters default to the start and end times of the last aggregation interval currently available; that is, only the last aggregation interval is returned. """) - @example("./examples/PoolListUsageMetrics.json", "Pool list usage metrics") - ListUsageMetrics is Azure.Core.ResourceList< - PoolUsageMetrics, - { - parameters: BatchApplicationListHeaders & Pool_ListUsageMetricRequestHeaders; - response: BatchResponseHeaders; - } - >; - - @summary("Gets lifetime summary statistics for all of the Pools in the specified Account.") - @doc(""" -Statistics are aggregated across all Pools that have ever existed in the -Account, from Account creation to the last update time of the statistics. The -statistics may not be immediately available. The Batch service performs -periodic roll-up of statistics. The typical delay is about 30 minutes. -""") - @route("/lifetimepoolstats") - @example( - "./examples/PoolGetLifetimeStatistics.json", - "Pool get lifetime statistics" - ) + @route("/poolusagemetrics") @get - GetAllPoolLifetimeStatistics is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders, - BatchResponseHeaders & PoolStatistics - >; + listPoolUsageMetrics is RpcOperation< + BatchApplicationListHeaders & { + @doc(""" + The earliest time from which to include metrics. This must be at least two and + a half hours before the current time. If not specified this defaults to the + start time of the last aggregation interval currently available. + """) + @query + starttime?: utcDateTime; + + @doc(""" + The latest time from which to include metrics. This must be at least two hours + before the current time. If not specified this defaults to the end time of the + last aggregation interval currently available. + """) + @query + endtime?: utcDateTime; + @doc(""" + An OData $filter clause. For more information on constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + """) + @query + $filter?: string; + }, + BatchResponseHeaders & PoolListUsageMetricsResult, + {}, + BatchError + >; - @summary("Adds a Pool to the specified Account.") + @summary("Creates a Pool to the specified Account.") @doc(""" When naming Pools, avoid including sensitive information such as user names or secret project names. This information may appear in telemetry logs accessible to Microsoft Support engineers. """) @route("/pools") - @example( - "./examples/PoolAdd_CloudServiceConfiguration.json", - "Add a CloudServiceConfiguration pool" - ) - @example( - "./examples/PoolAdd_VirtualMachineConfiguration.json", - "Add a VirtualMachineConfiguration pool" - ) - @example( - "./examples/PoolAdd_VirtualMachineConfigurationWithContainers.json", - "Add a VirtualMachineConfiguration pool with containers" - ) - @example( - "./examples/PoolAdd_VirtualMachineConfigurationWithExtensions.json", - "Add a VirtualMachineConfiguration pool with extensions" - ) - @example( - "./examples/PoolAdd_MountConfiguration.json", - "Add a pool with mount drive specified" - ) - @post - AddPool is Azure.Core.Foundations.Operation< + createPool is RpcOperation< BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @body - @doc("The Pool to be added.") - pool: BatchPool; + @doc("The Pool to be created.") + body: BatchPoolCreateOptions; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "201"; + @statusCode + code: "201"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; - -@summary("Lists all of the Pools in the specified Account.") + @summary("Lists all of the Pools in the specified Account.") @doc("Lists all of the Pools in the specified Account.") @route("/pools") - @example( - "./examples/PoolList_Basic.json", - "Pool list" - ) @get - ListPools is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders &{ + listPools is RpcOperation< + BatchApplicationListHeaders & { @doc(""" An OData $filter clause. For more information on constructing this filter, see https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. @@ -163,19 +186,19 @@ https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#li $filter?: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ format: "csv" }) + $select?: string[]; @doc("An OData $expand clause.") - @query - $expand?: string; + @query({ format: "csv" }) + $expand?: string[]; }, - BatchResponseHeaders & BatchPoolListResult + BatchResponseHeaders & BatchPoolListResult, + {}, + BatchError >; - - -@summary("Deletes a Pool from the specified Account.") + @summary("Deletes a Pool from the specified Account.") @doc(""" When you request that a Pool be deleted, the following actions occur: the Pool state is set to deleting; any ongoing resize operation on the Pool are stopped; @@ -190,95 +213,86 @@ zero size before deleting the Pool. If you call an Update, Patch or Delete API on a Pool in the deleting state, it will fail with HTTP status code 409 with error code PoolBeingDeleted. """) - @example( - "./examples/PoolDelete.json", - "Pool delete" - ) @route("/pools/{poolId}") @delete - DeletePool is Azure.Core.Foundations.Operation< + deletePool is RpcOperation< BatchClientRequestHeaders & BatchPoolHeaders, - DeleteResponseHeaders + DeleteResponseHeaders, + {}, + BatchError >; - - @doc("Gets basic properties of a Pool.") - @example( - "./examples/PoolExists.json", - "Check Pool Exists" - ) + @doc("Gets basic properties of a Pool.") @route("/pools/{poolId}") @head - Exists is Azure.Core.Foundations.Operation< + poolExists is RpcOperationWithAdditionalResponse< BatchClientRequestHeaders & BatchPoolHeaders, - (PoolDoesntExistResponseHeaders | PoolDoesExistResponseHeaders) + PoolDoesntExistResponseHeaders, + PoolDoesExistResponseHeaders, + {}, + BatchError >; - - - @doc("Gets information about the specified Pool.") - @example( - "./examples/PoolGet_Basic.json", - "Pool get" - ) - @example( - "./examples/PoolGet_VirtualMachineConfigurationWithExtensions.json", - "Get a VirtualMachineConfiguration pool with extensions" - ) + @doc("Gets information about the specified Pool.") @route("/pools/{poolId}") @get - GetPool is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchPoolHeaders & { - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, - BatchResponseHeaders & BatchPool - > ; - + getPool is RpcOperation< + BatchClientRequestHeaders & + BatchPoolHeaders & { + @doc("An OData $select clause.") + @query({ + format: "csv", + }) + $select?: string[]; + + @doc("An OData $expand clause.") + @query({ + format: "csv", + }) + $expand?: string[]; + }, + BatchResponseHeaders & BatchPool, + {}, + BatchError + >; - @summary("Updates the properties of the specified Pool.") + @summary("Updates the properties of the specified Pool.") @doc(""" This only replaces the Pool properties specified in the request. For example, if the Pool has a StartTask associated with it, and a request does not specify a StartTask element, then the Pool keeps the existing StartTask. """) - @example( - "./examples/PoolPatch.json", - "Patch the Pool" - ) @route("/pools/{poolId}") @patch - PatchPool is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchPoolHeaders &{ - @doc("The parameters for the request.") - @body - poolUpdate: BatchPool; - }, + updatePool is RpcOperation< + BatchClientRequestHeaders & + BatchPoolHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The pool properties to update.") + @body + body: BatchPoolUpdateOptions; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; - + @statusCode + code: "200"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; -@summary("Disables automatic scaling for a Pool.") + @summary("Disables automatic scaling for a Pool.") @doc("Disables automatic scaling for a Pool.") - @example( - "./examples/PoolDisableAutoScale.json", - "Disable pool autoscale" - ) @route("/pools/{poolId}/disableautoscale") @post - DisableAutoScale is Azure.Core.Foundations.Operation< + disablePoolAutoScale is RpcOperation< BatchClientRequestHeaders & { @doc("The ID of the Pool on which to disable automatic scaling.") @path @@ -286,16 +300,18 @@ a StartTask element, then the Pool keeps the existing StartTask. }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; - + @statusCode + code: "200"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; - - @summary("Enables automatic scaling for a Pool.") + @summary("Enables automatic scaling for a Pool.") @doc(""" You cannot enable automatic scaling on a Pool if a resize operation is in progress on the Pool. If automatic scaling of the Pool is currently disabled, @@ -304,65 +320,69 @@ scaling of the Pool is already enabled, you may specify a new autoscale formula and/or a new evaluation interval. You cannot call this API for the same Pool more than once every 30 seconds. """) - @example( - "./examples/PoolEnableAutoscale.json", - "Pool enable autoscale" - ) @route("/pools/{poolId}/enableautoscale") @post - EnableAutoScale is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchPoolHeaders & { - @doc("The parameters for the request.") - @body - parameters: BatchPoolEnableAutoScaleParameters; - }, + enablePoolAutoScale is RpcOperation< + BatchClientRequestHeaders & + BatchPoolHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The options to use for enabling automatic scaling.") + @body + body: BatchPoolEnableAutoScaleOptions; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; - + @statusCode + code: "200"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; - - @summary("Gets the result of evaluating an automatic scaling formula on the Pool.") + @summary("Gets the result of evaluating an automatic scaling formula on the Pool.") @doc(""" This API is primarily for validating an autoscale formula, as it simply returns the result without applying the formula to the Pool. The Pool must have auto scaling enabled in order to evaluate a formula. """) - @example( - "./examples/PoolEvaluateAutoscale.json", - "Pool evaluate autoscale" - ) @route("/pools/{poolId}/evaluateautoscale") @post - EvaluateAutoScale is Azure.Core.Foundations.Operation< + evaluatePoolAutoScale is RpcOperation< BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @doc("The ID of the Pool on which to evaluate the automatic scaling formula.") @path poolId: string; - @doc("The parameters for the request.") + @doc("The options to use for evaluating the automatic scaling formula.") @body - parameters: BatchPoolEvaluateAutoScaleParameters; + body: BatchPoolEvaluateAutoScaleOptions; }, - BatchResponseHeaders & AutoScaleRun & { - @doc("A process exit code.") - @statusCode code: "200"; - - @doc("The OData ID of the resource to which the request applied") - @header("DataServiceId") - "DataServiceId": string; - } - >; - - + BatchResponseHeaders & + AutoScaleRun & { + @doc("A process exit code.") + @statusCode + code: "200"; + @doc("The OData ID of the resource to which the request applied") + @header("DataServiceId") + DataServiceId: string; + }, + {}, + BatchError + >; - @summary("Changes the number of Compute Nodes that are assigned to a Pool.") + @summary("Changes the number of Compute Nodes that are assigned to a Pool.") @doc(""" You can only resize a Pool when its allocation state is steady. If the Pool is already resizing, the request fails with status code 409. When you resize a @@ -372,27 +392,30 @@ the Batch service returns an error 409. If you resize a Pool downwards, the Batch service chooses which Compute Nodes to remove. To remove specific Compute Nodes, use the Pool remove Compute Nodes API instead. """) - @example( - "./examples/PoolResize.json", - "Pool resize" - ) @route("/pools/{poolId}/resize") @post - Resize is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchPoolHeaders & { - - @doc("The parameters for the request.") - @body - parameters: BatchPoolResizeParameters; - }, + resizePool is RpcOperation< + BatchClientRequestHeaders & + BatchPoolHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The options to use for resizing the pool.") + @body + body: BatchPoolResizeOptions; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "202"; - + @statusCode + code: "202"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; @summary("Stops an ongoing resize operation on the Pool.") @@ -405,15 +428,13 @@ the Pool allocation state changes first to stopping and then to steady. A resize operation need not be an explicit resize Pool request; this API can also be used to halt the initial sizing of the Pool when it is created. """) - @example( - "./examples/PoolStopResize.json", - "Pool stop resize" - ) @route("/pools/{poolId}/stopresize") @post - StopResize is Azure.Core.Foundations.Operation< + stopPoolResize is RpcOperation< BatchClientRequestHeaders & BatchPoolHeaders, - PoolStopResizeResponseHeaders + PoolStopResizeResponseHeaders, + {}, + BatchError >; @summary("Updates the properties of the specified Pool.") @@ -422,114 +443,98 @@ This fully replaces all the updatable properties of the Pool. For example, if the Pool has a StartTask associated with it and if StartTask is not specified with this request, then the Batch service will remove the existing StartTask. """) - @example( - "./examples/PoolUpdate.json", - "Pool update" - ) @route("/pools/{poolId}/updateproperties") @post - UpdateProperties is Azure.Core.Foundations.Operation< + replacePoolProperties is RpcOperation< BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @doc("The ID of the Pool to update.") @path poolId: string; - - @doc("The parameters for the request.") + @doc("The options to use for replacing properties on the pool.") @body - poolUpdatePropertiesParameter: BatchPool; + body: BatchPoolReplaceOptions; }, - PoolUpdatePropertiesResponseHeaders + PoolUpdatePropertiesResponseHeaders, + {}, + BatchError >; - @summary("Removes Compute Nodes from the specified Pool.") @doc(""" This operation can only run when the allocation state of the Pool is steady. When this operation runs, the allocation state changes from steady to resizing. Each request may remove up to 100 nodes. """) - @example( - "./examples/PoolRemoveNodes.json", - "Pool remove nodes" - ) @route("/pools/{poolId}/removenodes") @post - RemoveNodes is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchPoolHeaders & { - - @doc("The parameters for the request.") - @body - parameters: NodeRemoveParameters; - }, - PoolRemoveNodesResponseHeaders + removeNodes is RpcOperation< + BatchClientRequestHeaders & + BatchPoolHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The options to use for removing the node.") + @body + body: NodeRemoveOptions; + }, + PoolRemoveNodesResponseHeaders, + {}, + BatchError >; - } -@operationGroup -@tag("Accounts") -interface Account { +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" +#suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Operations defined using RpcOperation should not have path parameters." +#suppress "@azure-tools/typespec-azure-core/rpc-operation-request-body" "RPCOperation with '@get' cannot have a body." +interface Accounts { @summary("Lists all Virtual Machine Images supported by the Azure Batch service.") @doc("Lists all Virtual Machine Images supported by the Azure Batch service.") - @example("./examples/AccountListSupportedImages.json", "Account list node agent skus") @route("/supportedimages") @get - ListSupportedImages is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc(""" + listSupportedImages( + ...BatchApplicationListHeaders, + + @doc(""" An OData $filter clause. For more information on constructing this filter, see https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. """) - @query - $filter?: string; - }, - BatchResponseHeaders & AccountListSupportedImagesResult - >; - + @query + $filter?: string + ): (BatchResponseHeaders & AccountListSupportedImagesResult) | BatchError; - @doc(""" + @doc(""" Gets the number of Compute Nodes in each state, grouped by Pool. Note that the numbers returned may not always be up to date. If you need exact node counts, use a list query. """) - @example("./examples/AccountListPoolNodeCounts.json", "NodeCountsPayload") @route("/nodecounts") @get - ListPoolNodeCounts is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc(""" + listPoolNodeCounts is RpcOperation< + BatchApplicationListHeaders & { + @doc(""" An OData $filter clause. For more information on constructing this filter, see https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. """) - @query - $filter?: string; - }, - BatchClientResponseHeaders & PoolNodeCountsListResult + @query + $filter?: string; + }, + BatchClientResponseHeaders & PoolNodeCountsListResult, + {}, + BatchError >; - } -@operationGroup -@tag("Jobs") -interface Job { - @summary("Gets lifetime summary statistics for all of the Jobs in the specified Account.") - @doc(""" -Statistics are aggregated across all Jobs that have ever existed in the -Account, from Account creation to the last update time of the statistics. The -statistics may not be immediately available. The Batch service performs -periodic roll-up of statistics. The typical delay is about 30 minutes. -""") - @example("./examples/JobGetLifetimeStatistics.json", "Job get lifetime statistics") - @route("/lifetimejobstats") - @get - GetAllJobLifetimeStatistics is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders, - BatchResponseHeaders & JobStatistics - >; - - - @summary("Deletes a Job.") +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" +#suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Operations defined using RpcOperation should not have path parameters." +#suppress "@azure-tools/typespec-azure-core/rpc-operation-request-body" "RPCOperation with '@get' cannot have a body." +interface Jobs { + @summary("Deletes a Job.") @doc(""" Deleting a Job also deletes all Tasks that are part of that Job, and all Job statistics. This also overrides the retention period for Task data; that is, if @@ -540,105 +545,121 @@ deleting state. All update operations on a Job that is in deleting state will fail with status code 409 (Conflict), with additional information indicating that the Job is being deleted. """) - @example("./examples/JobDelete.json", "Delete Job") @route("/jobs/{jobId}") @delete - DeleteJob is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job to delete.") - @path - jobId: string; - }, - DeleteResponseHeaders + deleteJob is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job to delete.") + @path + jobId: string; + }, + DeleteResponseHeaders, + {}, + BatchError >; - -@summary("Gets information about the specified Job.") + @summary("Gets information about the specified Job.") @doc("Gets information about the specified Job.") - @example("./examples/JobGet.json", "Job get") @route("/jobs/{jobId}") @get - GetJob is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job.") - @path - jobId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - - - }, - BatchResponseHeaders & BatchJob + getJob is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job.") + @path + jobId: string; + + @doc("An OData $select clause.") + @query({ + format: "csv", + }) + $select?: string[]; + + @doc("An OData $expand clause.") + @query({ + format: "csv", + }) + $expand?: string[]; + }, + BatchResponseHeaders & BatchJob, + {}, + BatchError >; - - @summary("Updates the properties of the specified Job.") + @summary("Updates the properties of the specified Job.") @doc(""" This replaces only the Job properties specified in the request. For example, if the Job has constraints, and a request does not specify the constraints element, then the Job keeps the existing constraints. """) - @example("./examples/JobPatch.json", "Job patch") @route("/jobs/{jobId}") @patch - PatchJob is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job whose properties you want to update.") - @path - jobId: string; + updateJob is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The ID of the Job whose properties you want to update.") + @path + jobId: string; - @doc("The parameters for the request.") - @body - jobUpdate: BatchJob; - }, + @doc("The options to use for updating the Job.") + @body + body: BatchJobUpdateOptions; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; - + @statusCode + code: "200"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; - @summary("Updates the properties of the specified Job.") @doc(""" This fully replaces all the updatable properties of the Job. For example, if the Job has constraints associated with it and if constraints is not specified with this request, then the Batch service will remove the existing constraints. """) - @example("./examples/JobUpdate.json", "Job update") @route("/jobs/{jobId}") @put - UpdateJob is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job whose properties you want to update.") - @path - jobId: string; + replaceJob is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The ID of the Job whose properties you want to update.") + @path + jobId: string; - @doc("The parameters for the request.") - @body - job: BatchJob; - }, + @doc("A job with updated properties") + @body + body: BatchJob; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; - + @statusCode + code: "200"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; - - @summary("Disables the specified Job, preventing new Tasks from running.") + @summary("Disables the specified Job, preventing new Tasks from running.") @doc(""" The Batch Service immediately moves the Job to the disabling state. Batch then uses the disableTasks parameter to determine what to do with the currently @@ -649,27 +670,34 @@ are started under the Job until it moves back to active state. If you try to disable a Job that is in any state other than active, disabling, or disabled, the request fails with status code 409. """) - @example("./examples/JobDisable.json", "Job disable") @route("/jobs/{jobId}/disable") @post - DisableJob is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders &{ - @doc("The ID of the Job to disable.") - @path - jobId: string; + disableJob is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The ID of the Job to disable.") + @path + jobId: string; - @doc("The parameters for the request.") - @body - parameters: BatchJobDisableParameters; - }, + @doc("The options to use for disabling the Job.") + @body + body: BatchJobDisableOptions; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "202"; - + @statusCode + code: "202"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; @summary("Enables the specified Job, allowing new Tasks to run.") @@ -681,27 +709,29 @@ does not allow a Task to remain in the active state for more than 180 days. Therefore, if you enable a Job containing active Tasks which were added more than 180 days ago, those Tasks will not run. """) - @example("./examples/JobEnable.json", "Job enable") @route("/jobs/{jobId}/enable") @post - EnableJob is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job to enable.") - @path - jobId: string; - }, + enableJob is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job to enable.") + @path + jobId: string; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "202"; - + @statusCode + code: "202"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; - -@summary("Terminates the specified Job, marking it as completed.") + @summary("Terminates the specified Job, marking it as completed.") @doc(""" When a Terminate Job request is received, the Batch service sets the Job to the terminating state. The Batch service then terminates any running Tasks @@ -710,30 +740,37 @@ moves into the completed state. If there are any Tasks in the Job in the active state, they will remain in the active state. Once a Job is terminated, new Tasks cannot be added and any remaining active Tasks will not be scheduled. """) - @example("./examples/JobTerminate.json", "Job terminate") @route("/jobs/{jobId}/terminate") @post - TerminateJob is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job to terminate.") - @path - jobId: string; + terminateJob is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The ID of the Job to terminate.") + @path + jobId: string; - @doc("The parameters for the request.") - @body - parameters?: BatchJobTerminateParameters; - }, - BatchResponseHeaders & { + @doc("The options to use for terminating the Job.") + @body + body?: BatchJobTerminateOptions; + }, + BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "202"; - + @statusCode + code: "202"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; - @summary("Adds a Job to the specified Account.") + @summary("Creates a Job to the specified Account.") @doc(""" The Batch service supports two ways to control the work done as part of a Job. In the first approach, the user specifies a Job Manager Task. The Batch service @@ -745,32 +782,36 @@ including sensitive information such as user names or secret project names. This information may appear in telemetry logs accessible to Microsoft Support engineers. """) - @example("./examples/JobAdd_Basic.json", "Add a basic job") - @example("./examples/JobAdd_Complex.json", "Add a complex job") @route("/jobs") @post - AddJob is Azure.Core.Foundations.Operation< + createJob is RpcOperation< BatchClientRequestHeaders & { - @doc("The Job to be added.") + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The Job to be created.") @body - job: BatchJob; + body: BatchJobCreateOptions; }, - BatchResponseHeaders & { + BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "201"; - + @statusCode + code: "201"; + @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; - } + DataServiceId: string; + }, + {}, + BatchError >; @summary("Lists all of the Jobs in the specified Account.") @doc("Lists all of the Jobs in the specified Account.") - @example("./examples/JobList.json", "Job list") @route("/jobs") @get - ListJobs is Azure.Core.Foundations.Operation< + listJobs is RpcOperation< BatchApplicationListHeaders & { @doc(""" An OData $filter clause. For more information on constructing this filter, see @@ -780,23 +821,23 @@ https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#li $filter?: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ format: "csv" }) + $select?: string[]; @doc("An OData $expand clause.") - @query - $expand?: string; - + @query({ format: "csv" }) + $expand?: string[]; }, - BatchResponseHeaders & BatchJobListResult + BatchResponseHeaders & BatchJobListResult, + {}, + BatchError >; @summary("Lists the Jobs that have been created under the specified Job Schedule.") @doc("Lists the Jobs that have been created under the specified Job Schedule.") - @example("./examples/JobListFromJobSchedule.json", "List Job Under Job Schedule") @route("/jobschedules/{jobScheduleId}/jobs") @get - ListFromJobSchedule is Azure.Core.Foundations.Operation< + listJobsFromSchedule is RpcOperation< BatchApplicationListHeaders & { @doc("The ID of the Job Schedule from which you want to get a list of Jobs.") @path @@ -810,14 +851,16 @@ https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#li $filter?: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ format: "csv" }) + $select?: string[]; @doc("An OData $expand clause.") - @query - $expand?: string; + @query({ format: "csv" }) + $expand?: string[]; }, - BatchResponseHeaders & BatchJobListResult + BatchResponseHeaders & BatchJobListResult, + {}, + BatchError >; @summary(""" @@ -832,28 +875,29 @@ invoked on a Job which has no Job Preparation or Job Release Task, the Batch service returns HTTP status code 409 (Conflict) with an error code of JobPreparationTaskNotSpecified. """) - @example("./examples/JobListPreparationAndReleaseTaskStatus.json", "Job list preparation and release task status") @route("/jobs/{jobId}/jobpreparationandreleasetaskstatus") @get - ListPreparationAndReleaseTaskStatus is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Job.") - @path - jobId: string; + listJobPreparationAndReleaseTaskStatus( + ...BatchApplicationListHeaders, - @doc(""" + @doc("The ID of the Job.") + @path + jobId: string, + + @doc(""" An OData $filter clause. For more information on constructing this filter, see https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. """) - @query - $filter?: string; + @query + $filter?: string, - @doc("An OData $select clause.") - @query - $select?: string; - }, - BatchResponseHeaders & BatchJobListPreparationAndReleaseTaskStatusResult - >; + @doc("An OData $select clause.") + @query({ + format: "csv", + }) + $select?: string[] + ): (BatchResponseHeaders & + BatchJobListPreparationAndReleaseTaskStatusResult) | BatchError; @summary("Gets the Task counts for the specified Job.") @doc(""" @@ -862,46 +906,58 @@ state, and a count of Tasks which succeeded or failed. Tasks in the preparing state are counted as running. Note that the numbers returned may not always be up to date. If you need exact task counts, use a list query. """) - @example("./examples/JobGetTaskCounts.json", "Job get task counts") @route("/jobs/{jobId}/taskcounts") @get - GetTaskCounts is Azure.Core.Foundations.Operation< + getJobTaskCounts is RpcOperation< BatchClientRequestHeaders & { @doc("The ID of the Job.") @path jobId: string; }, - BatchClientResponseHeaders & TaskCountsResult + BatchClientResponseHeaders & TaskCountsResult, + {}, + BatchError >; } -@operationGroup -@tag("Certificates") +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" +#suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Operations defined using RpcOperation should not have path parameters." +#suppress "@azure-tools/typespec-azure-core/rpc-operation-request-body" "RPCOperation with '@get' cannot have a body." interface Certificates { - @summary("Adds a Certificate to the specified Account.") - @doc("Adds a Certificate to the specified Account.") - @example("./examples/CertificateAdd.json", "Certificate add") + #deprecated "Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead." + @summary("Creates a Certificate to the specified Account.") + @doc("Creates a Certificate to the specified Account.") @route("/certificates") @post - AddCertificate is Azure.Core.Foundations.Operation< + createCertificate is RpcOperation< BatchClientRequestHeaders & { - - @doc("The Certificate to be added.") + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The Certificate to be created.") @body - certificate: Certificate; + body: BatchCertificate; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "201"; - } + @statusCode + code: "201"; + + @header("DataServiceId") + @doc("The OData ID of the resource to which the request applied.") + DataServiceId: string; + }, + {}, + BatchError >; + #deprecated "Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead." @summary("Lists all of the Certificates that have been added to the specified Account.") @doc("Lists all of the Certificates that have been added to the specified Account.") - @example("./examples/CertificateList.json", "Certificate list") @route("/certificates") @get - ListCertificates is Azure.Core.Foundations.Operation< + listCertificates is RpcOperation< BatchApplicationListHeaders & { @doc(""" An OData $filter clause. For more information on constructing this filter, see @@ -911,12 +967,15 @@ https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#li $filter?: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ format: "csv" }) + $select?: string[]; }, - BatchResponseHeaders & CertificateListResult + BatchResponseHeaders & CertificateListResult, + {}, + BatchError >; + #deprecated "Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead." @summary("Cancels a failed deletion of a Certificate from the specified Account.") @doc(""" If you try to delete a Certificate that is being used by a Pool or Compute @@ -927,10 +986,9 @@ Certificate, you do not need to run this operation after the deletion failed. You must make sure that the Certificate is not being used by any resources, and then you can try again to delete the Certificate. """) - @example("./examples/CertificateCancelDelete.json", "Certificate cancel delete") @route("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete") @post - CancelCertificateDeletion is Azure.Core.Foundations.Operation< + cancelCertificateDeletion is RpcOperation< BatchClientRequestHeaders & { @doc("The algorithm used to derive the thumbprint parameter. This must be sha1.") @path @@ -939,19 +997,21 @@ then you can try again to delete the Certificate. @doc("The thumbprint of the Certificate being deleted.") @path thumbprint: string; - - }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "204"; + @statusCode + code: "204"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; + #deprecated "Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead." @summary("Deletes a Certificate from the specified Account.") @doc(""" You cannot delete a Certificate if a resource (Pool or Compute Node) is using @@ -964,10 +1024,9 @@ that is in use, the deletion fails. The Certificate status changes to deleteFailed. You can use Cancel Delete Certificate to set the status back to active if you decide that you want to continue using the Certificate. """) - @example("./examples/CertificateDelete.json", "Certificate delete") @route("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})") @delete - DeleteCertificate is Azure.Core.Foundations.Operation< + deleteCertificate is RpcOperation< BatchClientRequestHeaders & { @doc("The algorithm used to derive the thumbprint parameter. This must be sha1.") @path @@ -979,15 +1038,18 @@ active if you decide that you want to continue using the Certificate. }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "202"; - } + @statusCode + code: "202"; + }, + {}, + BatchError >; + #deprecated "Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead." @doc("Gets information about the specified Certificate.") - @example("./examples/CertificateGet.json", "Certificate get") @route("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})") @get - GetCertificate is Azure.Core.Foundations.Operation< + getCertificate is RpcOperation< BatchClientRequestHeaders & { @doc("The algorithm used to derive the thumbprint parameter. This must be sha1.") @path @@ -998,210 +1060,45 @@ active if you decide that you want to continue using the Certificate. thumbprint: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ + format: "csv", + }) + $select?: string[]; }, - BatchResponseHeaders & Certificate + BatchResponseHeaders & BatchCertificate, + {}, + BatchError >; } -@operationGroup -@tag("Files") -interface File { - @summary("Deletes the specified Task file from the Compute Node where the Task ran.") - @doc("Deletes the specified Task file from the Compute Node where the Task ran.") - @example("./examples/FileDeleteFromTask.json", "File delete from task") - @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") - @delete - DeleteFromTask is Azure.Core.Foundations.Operation< - BatchJobFileClientRequestHeaders & { - @doc(""" -Whether to delete children of a directory. If the filePath parameter represents -a directory instead of a file, you can set recursive to true to delete the -directory and all of the files and subdirectories in it. If recursive is false -then the directory must be empty or deletion will fail. -""") - @query - recursive?: boolean; - }, - BatchClientResponseHeaders & { - @doc("A process exit code.") - @statusCode code: "200"; - } - >; - - @doc("Returns the content of the specified Task file.") - @example("./examples/FileGetFromTask.json", "Get File From Task") - @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") - @get - GetFromTask is Azure.Core.Foundations.Operation< - BatchJobFileClientRequestHeaders & BatchModifiedSinceHeaders & { - - @doc(""" -The byte range to be retrieved. The default is to retrieve the entire file. The -format is bytes=startRange-endRange. -""") - @header - "ocp-range"?: string; - }, - BatchResponseHeaders & FileResponse & { - @header("content-type") contentType: "application/octet-stream"; - - @body - @doc("A response containing the file content.") - file: bytes; - } - >; - - @doc("Gets the properties of the specified Task file.") - @example("./examples/FileGetPropertiesFromTask.json", "File get properties from task") - @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") - @head - GetPropertiesFromTask is Azure.Core.Foundations.Operation< - BatchJobFileClientRequestHeaders & BatchModifiedSinceHeaders, - BatchResponseHeaders & FileResponse - >; - - @summary("Deletes the specified file from the Compute Node.") - @doc("Deletes the specified file from the Compute Node.") - @example("./examples/FileDeleteFromNode.json", "File delete from node") - @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") - @delete - DeleteFromComputeNode is Azure.Core.Foundations.Operation< - BatchPoolFileClientRequestHeaders & { - - @doc(""" -Whether to delete children of a directory. If the filePath parameter represents -a directory instead of a file, you can set recursive to true to delete the -directory and all of the files and subdirectories in it. If recursive is false -then the directory must be empty or deletion will fail. -""") - @query - recursive?: boolean; - }, - BatchClientResponseHeaders & { - @doc("A process exit code.") - @statusCode code: "200"; - } - >; - - @doc("Returns the content of the specified Compute Node file.") - @example("./examples/FileGetFromNode.json", "Get File From Compute Node") - @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") - @get - GetFromComputeNode is Azure.Core.Foundations.Operation< - BatchPoolFileClientRequestHeaders & BatchModifiedSinceHeaders & { - - @doc(""" -The byte range to be retrieved. The default is to retrieve the entire file. The -format is bytes=startRange-endRange. -""") - @header - "ocp-range"?: string; - }, - BatchResponseHeaders & FileResponse & { - @header("content-type") contentType: "application/octet-stream"; - - @body - @doc("A response containing the file content.") - file: bytes; - } - >; - - @doc("Gets the properties of the specified Compute Node file.") - @example("./examples/FileGetPropertiesFromNode.json", "File get properties from node") - @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") - @head - GetPropertiesFromComputeNode is Azure.Core.Foundations.Operation< - BatchPoolFileClientRequestHeaders & BatchModifiedSinceHeaders, - BatchResponseHeaders & FileResponse - >; - - @summary("Lists the files in a Task's directory on its Compute Node.") - @doc("Lists the files in a Task's directory on its Compute Node.") - @example("./examples/FileListFromTask.json", "File list from task") - @route("/jobs/{jobId}/tasks/{taskId}/files") - @get - ListFromTask is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Job that contains the Task.") - @path - jobId: string; - - @doc("The ID of the Task whose files you want to list.") - @path - taskId: string; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. -""") - @query - $filter?: string; - - @doc(""" -Whether to list children of the Task directory. This parameter can be used in -combination with the filter parameter to list specific type of files. -""") - @query - recursive?: boolean; - }, - BatchResponseHeaders & NodeFileListResult - >; - - @summary("Lists all of the files in Task directories on the specified Compute Node.") - @doc("Lists all of the files in Task directories on the specified Compute Node.") - @example("./examples/FileListFromNode.json", "File list from node") - @route("/pools/{poolId}/nodes/{nodeId}/files") - @get - ListFromComputeNode is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; - - @doc("The ID of the Compute Node whose files you want to list.") - @path - nodeId: string; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. -""") - @query - $filter?: string; - - @doc("Whether to list children of a directory.") - @query - recursive?: boolean; - }, - BatchResponseHeaders & NodeFileListResult - >; -} - -@operationGroup -@tag("JobSchedules") -interface JobSchedule { +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" +#suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Operations defined using RpcOperation should not have path parameters." +#suppress "@azure-tools/typespec-azure-core/use-standard-operations" "Operation 'Exists' should be defined using a signature from the Azure.Core namespace." +#suppress "@azure-tools/typespec-azure-core/rpc-operation-request-body" "RPCOperation with '@get' cannot have a body." +interface JobSchedules { @summary("Checks the specified Job Schedule exists.") @doc("Checks the specified Job Schedule exists.") - @example("./examples/JobScheduleExists.json", "Check Job Schedule Exists") @route("/jobschedules/{jobScheduleId}") @head - JobScheduleExists is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job Schedule which you want to check.") - @path - jobScheduleId: string; - }, + jobScheduleExists is RpcOperationWithAdditionalResponse< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job Schedule which you want to check.") + @path + jobScheduleId: string; + }, BatchResponseHeaders & { @doc("A response containing headers related to the Job Schedule, if it exists.") - @statusCode code: "200"; - } | - { + @statusCode + code: "200"; + }, + { @doc("The Job Schedule does not exist.") - @statusCode code: "404"; - } + @statusCode + code: "404"; + }, + {}, + BatchError >; @summary("Deletes a Job Schedule from the specified Account.") @@ -1212,37 +1109,45 @@ the Compute Nodes are also deleted (the retention period is ignored). The Job Schedule statistics are no longer accessible once the Job Schedule is deleted, though they are still counted towards Account lifetime statistics. """) - @example("./examples/JobScheduleDelete.json", "JobSchedule delete") @route("/jobschedules/{jobScheduleId}") @delete - DeleteJobSchedule is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job Schedule to delete.") - @path - jobScheduleId: string; - }, - DeleteResponseHeaders + deleteJobSchedule is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job Schedule to delete.") + @path + jobScheduleId: string; + }, + DeleteResponseHeaders, + {}, + BatchError >; @doc("Gets information about the specified Job Schedule.") - @example("./examples/JobScheduleGet.json", "JobSchedule get") @route("/jobschedules/{jobScheduleId}") @get - GetJobSchedule is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job Schedule to get.") - @path - jobScheduleId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, - BatchResponseHeaders & BatchJobSchedule + getJobSchedule is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job Schedule to get.") + @path + jobScheduleId: string; + + @doc("An OData $select clause.") + @query({ + format: "csv", + }) + $select?: string[]; + + @doc("An OData $expand clause.") + @query({ + format: "csv", + }) + $expand?: string[]; + }, + BatchResponseHeaders & BatchJobSchedule, + {}, + BatchError >; @summary("Updates the properties of the specified Job Schedule.") @@ -1253,27 +1158,34 @@ Batch service will keep the existing schedule. Changes to a Job Schedule only impact Jobs created by the schedule after the update has taken place; currently running Jobs are unaffected. """) - @example("./examples/JobSchedulePatch.json", "JobSchedule patch") @route("/jobschedules/{jobScheduleId}") @patch - PatchJobSchedule is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job Schedule to update.") - @path - jobScheduleId: string; + updateJobSchedule is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The ID of the Job Schedule to update.") + @path + jobScheduleId: string; - @doc("The parameters for the request.") - @body - jobScheduleUpdate: BatchJobSchedule; - }, + @doc("The options to use for updating the Job Schedule.") + @body + body: BatchJobScheduleUpdateOptions; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; + @statusCode + code: "200"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Updates the properties of the specified Job Schedule.") @@ -1284,120 +1196,140 @@ Batch service will remove the existing schedule. Changes to a Job Schedule only impact Jobs created by the schedule after the update has taken place; currently running Jobs are unaffected. """) - @example("./examples/JobScheduleUpdate.json", "JobSchedule update") @route("/jobschedules/{jobScheduleId}") @put - UpdateJobSchedule is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job Schedule to update.") - @path - jobScheduleId: string; + replaceJobSchedule is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The ID of the Job Schedule to update.") + @path + jobScheduleId: string; - @doc("The parameters for the request.") - @body - jobSchedule: BatchJobSchedule; - }, + @doc("A Job Schedule with updated properties") + @body + body: BatchJobSchedule; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; + @statusCode + code: "200"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Disables a Job Schedule.") @doc("No new Jobs will be created until the Job Schedule is enabled again.") - @example("./examples/JobScheduleDisable.json", "JobSchedule disable") @route("/jobschedules/{jobScheduleId}/disable") @post - DisableJobSchedule is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job Schedule to disable.") - @path - jobScheduleId: string; - }, + disableJobSchedule is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job Schedule to disable.") + @path + jobScheduleId: string; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "204"; + @statusCode + code: "204"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Enables a Job Schedule.") @doc("Enables a Job Schedule.") - @example("./examples/JobScheduleEnable.json", "JobSchedule enable") @route("/jobschedules/{jobScheduleId}/enable") @post - EnableJobSchedule is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job Schedule to enable.") - @path - jobScheduleId: string; - }, + enableJobSchedule is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job Schedule to enable.") + @path + jobScheduleId: string; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "204"; + @statusCode + code: "204"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Terminates a Job Schedule.") @doc("Terminates a Job Schedule.") - @example("./examples/JobScheduleTerminate.json", "JobSchedule terminate") @route("/jobschedules/{jobScheduleId}/terminate") @post - TerminateJobSchedule is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job Schedule to terminates.") - @path - jobScheduleId: string; - }, + terminateJobSchedule is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job Schedule to terminates.") + @path + jobScheduleId: string; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "202"; + @statusCode + code: "202"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; - @summary("Adds a Job Schedule to the specified Account.") - @doc("Adds a Job Schedule to the specified Account.") - @example("./examples/JobScheduleAdd_Basic.json", "Add a basic JobSchedule") - @example("./examples/JobScheduleAdd_Complex.json", "Add a complex JobScheduleAdd") + @summary("Creates a Job Schedule to the specified Account.") + @doc("Creates a Job Schedule to the specified Account.") @route("/jobschedules") @post - AddJobSchedule is Azure.Core.Foundations.Operation< + createJobSchedule is RpcOperation< BatchClientRequestHeaders & { - @doc("The Job Schedule to be added.") + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The Job Schedule to be created.") @body - jobSchedule: BatchJobSchedule; + body: BatchJobScheduleCreateOptions; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "201"; + @statusCode + code: "201"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Lists all of the Job Schedules in the specified Account.") @doc("Lists all of the Job Schedules in the specified Account.") - @example("./examples/JobScheduleList.json", "JobSchedule list") @route("/jobschedules") @get - ListJobSchedules is Azure.Core.Foundations.Operation< + listJobSchedules is RpcOperation< BatchApplicationListHeaders & { @doc(""" An OData $filter clause. For more information on constructing this filter, see @@ -1407,50 +1339,56 @@ https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#li $filter?: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ format: "csv" }) + $select?: string[]; @doc("An OData $expand clause.") - @query - $expand?: string; + @query({ format: "csv" }) + $expand?: string[]; }, - BatchResponseHeaders & BatchJobScheduleListResult + BatchResponseHeaders & BatchJobScheduleListResult, + {}, + BatchError >; } -@operationGroup -@tag("Tasks") -interface Task { - @summary("Adds a Task to the specified Job.") +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" +#suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Operations defined using RpcOperation should not have path parameters." +#suppress "@azure-tools/typespec-azure-core/rpc-operation-request-body" "RPCOperation with '@get' cannot have a body." +interface Tasks { + @summary("Creates a Task to the specified Job.") @doc(""" The maximum lifetime of a Task from addition to completion is 180 days. If a Task has not completed within 180 days of being added it will be terminated by the Batch service and left in whatever state it was in at that time. """) - @example("./examples/TaskAdd_Basic.json", "Add a basic task") - @example("./examples/TaskAdd_ExitConditions.json", "Add a task with exit conditions") - @example("./examples/TaskAdd_ContainerSettings.json", "Add a task with container settings") - @example("./examples/TaskAdd_RequiredSlots.json", "Add a task with extra slot requirement") @route("/jobs/{jobId}/tasks") @post - AddTask is Azure.Core.Foundations.Operation< + createTask is RpcOperation< BatchClientRequestHeaders & { - @doc("The ID of the Job to which the Task is to be added.") + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The ID of the Job to which the Task is to be created.") @path jobId: string; - @doc("The Task to be added.") + @doc("The Task to be created.") @body - task: BatchTask; + body: BatchTaskCreateOptions; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "201"; + @statusCode + code: "201"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Lists all of the Tasks that are associated with the specified Job.") @@ -1459,10 +1397,9 @@ For multi-instance Tasks, information such as affinityId, executionInfo and nodeInfo refer to the primary Task. Use the list subtasks API to retrieve information about subtasks. """) - @example("./examples/TaskList.json", "Task list") @route("/jobs/{jobId}/tasks") @get - ListTasks is Azure.Core.Foundations.Operation< + listTasks is RpcOperation< BatchApplicationListHeaders & { @doc("The ID of the Job.") @path @@ -1476,14 +1413,16 @@ https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#li $filter?: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ format: "csv" }) + $select?: string[]; @doc("An OData $expand clause.") - @query - $expand?: string; + @query({ format: "csv" }) + $expand?: string[]; }, - BatchResponseHeaders & BatchTaskListResult + BatchResponseHeaders & BatchTaskListResult, + {}, + BatchError >; @summary("Adds a collection of Tasks to the specified Job.") @@ -1503,21 +1442,25 @@ lifetime of a Task from addition to completion is 180 days. If a Task has not completed within 180 days of being added it will be terminated by the Batch service and left in whatever state it was in at that time. """) - @example("./examples/TaskAddCollection_Basic.json", "Add a basic collection of tasks") - @example("./examples/TaskAddCollection_Complex.json", "Add a complex collection of tasks") @route("/jobs/{jobId}/addtaskcollection") @post - AddTaskCollection is Azure.Core.Foundations.Operation< + createTaskCollection is RpcOperation< BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @doc("The ID of the Job to which the Task collection is to be added.") @path jobId: string; @doc("The Tasks to be added.") @body - taskCollection: BatchTaskCollection; + collection: BatchTaskCollection; }, - BatchClientResponseHeaders & TaskAddCollectionResult + BatchClientResponseHeaders & TaskAddCollectionResult, + {}, + BatchError >; @summary("Deletes a Task from the specified Job.") @@ -1528,23 +1471,26 @@ multi-instance Tasks, the delete Task operation applies synchronously to the primary task; subtasks and their files are then deleted asynchronously in the background. """) - @example("./examples/TaskDelete.json", "Task delete") @route("/jobs/{jobId}/tasks/{taskId}") @delete - DeleteTaskCollection is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job from which to delete the Task.") - @path - jobId: string; + deleteTask is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job from which to delete the Task.") + @path + jobId: string; - @doc("The ID of the Task to delete.") - @path - taskId: string; - }, + @doc("The ID of the Task to delete.") + @path + taskId: string; + }, BatchClientResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; - } + @statusCode + code: "200"; + }, + {}, + BatchError >; @summary("Gets information about the specified Task.") @@ -1553,72 +1499,85 @@ For multi-instance Tasks, information such as affinityId, executionInfo and nodeInfo refer to the primary Task. Use the list subtasks API to retrieve information about subtasks. """) - @example("./examples/TaskGet.json", "Task get") @route("/jobs/{jobId}/tasks/{taskId}") @get - GetTaskCollection is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job that contains the Task.") - @path - jobId: string; - - @doc("The ID of the Task to get information about.") - @path - taskId: string; - - @doc("An OData $select clause.") - @query - $select?: string; - - @doc("An OData $expand clause.") - @query - $expand?: string; - }, + getTask is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job that contains the Task.") + @path + jobId: string; - BatchResponseHeaders & BatchTask & { - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } + @doc("The ID of the Task to get information about.") + @path + taskId: string; + + @doc("An OData $select clause.") + @query({ + format: "csv", + }) + $select?: string[]; + + @doc("An OData $expand clause.") + @query({ + format: "csv", + }) + $expand?: string[]; + }, + BatchResponseHeaders & + BatchTask & { + @header("DataServiceId") + @doc("The OData ID of the resource to which the request applied.") + DataServiceId: string; + }, + {}, + BatchError >; @doc("Updates the properties of the specified Task.") - @example("./examples/TaskUpdate.json", "Task update") @route("/jobs/{jobId}/tasks/{taskId}") @put - UpdateTaskCollection is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job containing the Task.") - @path - jobId: string; + replaceTask is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + + @doc("The ID of the Job containing the Task.") + @path + jobId: string; - @doc("The ID of the Task to update.") - @path - taskId: string; + @doc("The ID of the Task to update.") + @path + taskId: string; - @doc("The parameters for the request.") - @body - task: BatchTask; - }, + @doc("The Task to update.") + @body + body: BatchTask; + }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; + @statusCode + code: "200"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; + #suppress "@azure-tools/typespec-azure-core/use-standard-names" "GET operations that return single objects should start with 'get'" @summary(""" Lists all of the subtasks that are associated with the specified multi-instance Task. """) @doc("If the Task is not a multi-instance Task then this returns an empty collection.") - @example("./examples/TaskListSubtasks.json", "Task list subtasks") @route("/jobs/{jobId}/tasks/{taskId}/subtasksinfo") @get - ListSubtasks is Azure.Core.Foundations.Operation< + listSubTasks is RpcOperation< BatchClientRequestHeaders & { @doc("The ID of the Job.") @path @@ -1629,10 +1588,14 @@ Task. taskId: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ + format: "csv", + }) + $select?: string[]; }, - BatchResponseHeaders & BatchTaskListSubtasksResult + BatchResponseHeaders & BatchTaskListSubtasksResult, + {}, + BatchError >; @summary("Terminates the specified Task.") @@ -1641,24 +1604,26 @@ When the Task has been terminated, it moves to the completed state. For multi-instance Tasks, the terminate Task operation applies synchronously to the primary task; subtasks are then terminated asynchronously in the background. """) - @example("./examples/TaskTerminate.json", "Task terminate") @route("/jobs/{jobId}/tasks/{taskId}/terminate") @post - TerminateTaskCollection is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job containing the Task.") - @path - jobId: string; + terminateTask is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job containing the Task.") + @path + jobId: string; - @doc("The ID of the Task to terminate.") - @path - taskId: string; - }, + @doc("The ID of the Task to terminate.") + @path + taskId: string; + }, BatchResponseHeaders & { @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary(""" @@ -1674,40 +1639,140 @@ is reset to 0. Reactivation will fail for Tasks that are not completed or that previously completed successfully (with an exit code of 0). Additionally, it will fail if the Job has completed (or is terminating or deleting). """) - @example("./examples/TaskReactivate.json", "Task reactivate") @route("/jobs/{jobId}/tasks/{taskId}/reactivate") @post - ReactivateTaskCollection is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & BatchMatchHeaders & { - @doc("The ID of the Job containing the Task.") + reactivateTask is RpcOperation< + BatchClientRequestHeaders & + BatchMatchHeaders & { + @doc("The ID of the Job containing the Task.") + @path + jobId: string; + + @doc("The ID of the Task to reactivate.") + @path + taskId: string; + }, + BatchResponseHeaders & { + @header("DataServiceId") + @doc("The OData ID of the resource to which the request applied.") + DataServiceId: string; + }, + {}, + BatchError + >; + + @summary("Deletes the specified Task file from the Compute Node where the Task ran.") + @doc("Deletes the specified Task file from the Compute Node where the Task ran.") + @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") + @delete + deleteTaskFile is RpcOperation< + BatchTaskFileClientPathParameters & { + @doc(""" +Whether to delete children of a directory. If the filePath parameter represents +a directory instead of a file, you can set recursive to true to delete the +directory and all of the files and subdirectories in it. If recursive is false +then the directory must be empty or deletion will fail. +""") + @query + recursive?: boolean; + }, + BatchClientResponseHeaders & { + @doc("A process exit code.") + @statusCode + code: "200"; + }, + {}, + BatchError + >; + + @doc("Returns the content of the specified Task file.") + @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") + @get + getTaskFile is RpcOperation< + BatchTaskFileClientPathParameters & + BatchModifiedSinceHeaders & { + @doc(""" +The byte range to be retrieved. The default is to retrieve the entire file. The +format is bytes=startRange-endRange. +""") + @header + `ocp-range`?: string; + }, + BatchResponseHeaders & + FileResponse & { + @doc("Type of content") + @header("content-type") + contentType: "application/octet-stream"; + + @body + @doc("A response containing the file content.") + file: bytes; + }, + {}, + BatchError + >; + + @doc("Gets the properties of the specified Task file.") + @route("/jobs/{jobId}/tasks/{taskId}/files/{filePath}") + @head + getTaskFileProperties is RpcOperation< + BatchTaskFileClientPathParameters & BatchModifiedSinceHeaders, + BatchResponseHeaders & FileResponse, + {}, + BatchError + >; + + @summary("Lists the files in a Task's directory on its Compute Node.") + @doc("Lists the files in a Task's directory on its Compute Node.") + @route("/jobs/{jobId}/tasks/{taskId}/files") + @get + listTaskFiles is RpcOperation< + BatchApplicationListHeaders & { + @doc("The ID of the Job that contains the Task.") @path jobId: string; - @doc("The ID of the Task to reactivate.") + @doc("The ID of the Task whose files you want to list.") @path taskId: string; + + @doc(""" +An OData $filter clause. For more information on constructing this filter, see +https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. +""") + @query + $filter?: string; + + @doc(""" +Whether to list children of the Task directory. This parameter can be used in +combination with the filter parameter to list specific type of files. +""") + @query + recursive?: boolean; }, - BatchResponseHeaders & { - @header("DataServiceId") - @doc("The OData ID of the resource to which the request applied.") - DataServiceId: string; - } + BatchResponseHeaders & NodeFileListResult, + {}, + BatchError >; } -@operationGroup -@tag("ComputeNodes") -interface ComputeNodes { +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" +#suppress "@azure-tools/typespec-azure-core/no-rpc-path-params" "Operations defined using RpcOperation should not have path parameters." +#suppress "@azure-tools/typespec-azure-core/rpc-operation-request-body" "RPCOperation with '@get' cannot have a body." +interface Nodes { @summary("Adds a user Account to the specified Compute Node.") @doc(""" You can add a user Account to a Compute Node only when it is in the idle or running state. """) - @example("./examples/NodeAddUser.json", "Node add user") @route("/pools/{poolId}/nodes/{nodeId}/users") @post - AddUser is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + createNodeUser is RpcOperation< + BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1716,18 +1781,21 @@ running state. @path nodeId: string; - @doc("The user Account to be created.") + @doc("The options to use for creating the user.") @body - user: ComputeNodeUser; + body: BatchNodeUserCreateOptions; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "201"; + @statusCode + code: "201"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Deletes a user Account from the specified Compute Node.") @@ -1735,11 +1803,10 @@ running state. You can delete a user Account to a Compute Node only when it is in the idle or running state. """) - @example("./examples/NodeDeleteUser.json", "Node delete user") @route("/pools/{poolId}/nodes/{nodeId}/users/{userName}") @delete - DeleteUser is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + deleteNodeUser is RpcOperation< + BatchClientRequestHeaders & { @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1754,13 +1821,15 @@ running state. }, BatchClientResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; - } + @statusCode + code: "200"; + }, + {}, + BatchError >; @summary(""" -Updates the password and expiration time of a user Account on the specified -Compute Node. +Updates the password and expiration time of a user Account on the specified Compute Node. """) @doc(""" This operation replaces of all the updatable properties of the Account. For @@ -1768,11 +1837,14 @@ example, if the expiryTime element is not specified, the current value is replaced with the default value, not left unmodified. You can update a user Account on a Compute Node only when it is in the idle or running state. """) - @example("./examples/NodeUpdateUser.json", "Node update user") @route("/pools/{poolId}/nodes/{nodeId}/users/{userName}") @put - UpdateUser is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + replaceNodeUser is RpcOperation< + BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1785,27 +1857,29 @@ Account on a Compute Node only when it is in the idle or running state. @path userName: string; - @doc("The parameters for the request.") + @doc("The options to use for updating the user.") @body - parameters: NodeUpdateUserParameters; + body: BatchNodeUserUpdateOptions; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; + @statusCode + code: "200"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Gets information about the specified Compute Node.") @doc("Gets information about the specified Compute Node.") - @example("./examples/NodeGet_Basic.json", "Node get") @route("/pools/{poolId}/nodes/{nodeId}") @get - GetComputeNode is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + getNode is RpcOperation< + BatchClientRequestHeaders & { @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1815,19 +1889,26 @@ Account on a Compute Node only when it is in the idle or running state. nodeId: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ + format: "csv", + }) + $select?: string[]; }, - BatchResponseHeaders & ComputeNode + BatchResponseHeaders & BatchNode, + {}, + BatchError >; @summary("Restarts the specified Compute Node.") @doc("You can restart a Compute Node only if it is in an idle or running state.") - @example("./examples/NodeReboot.json", "Node reboot") @route("/pools/{poolId}/nodes/{nodeId}/reboot") @post - RebootComputeNode is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + rebootNode is RpcOperation< + BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1836,18 +1917,21 @@ Account on a Compute Node only when it is in the idle or running state. @path nodeId: string; - @doc("The parameters for the request.") + @doc("The options to use for rebooting the Compute Node.") @body - parameters?: NodeRebootParameters; + body?: NodeRebootOptions; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "202"; + @statusCode + code: "202"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Reinstalls the operating system on the specified Compute Node.") @@ -1856,11 +1940,14 @@ You can reinstall the operating system on a Compute Node only if it is in an idle or running state. This API can be invoked only on Pools created with the cloud service configuration property. """) - @example("./examples/NodeReimage.json", "Node reimage") @route("/pools/{poolId}/nodes/{nodeId}/reimage") @post - ReimageComputeNode is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + reimageNode is RpcOperation< + BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1869,18 +1956,21 @@ cloud service configuration property. @path nodeId: string; - @doc("The parameters for the request.") + @doc("The options to use for reimaging the Compute Node.") @body - parameters?: NodeReimageParameters; + body?: NodeReimageOptions; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "202"; + @statusCode + code: "202"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Disables Task scheduling on the specified Compute Node.") @@ -1888,11 +1978,14 @@ cloud service configuration property. You can disable Task scheduling on a Compute Node only if its current scheduling state is enabled. """) - @example("./examples/NodeDisableScheduling.json", "Node disable scheduling") @route("/pools/{poolId}/nodes/{nodeId}/disablescheduling") @post - DisableScheduling is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + disableNodeScheduling is RpcOperation< + BatchClientRequestHeaders & { + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; + @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1901,18 +1994,21 @@ scheduling state is enabled. @path nodeId: string; - @doc("The parameters for the request.") + @doc("The options to use for disabling scheduling on the Compute Node.") @body - parameters?: NodeDisableSchedulingParameters; + body?: NodeDisableSchedulingOptions; }, BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; + @statusCode + code: "200"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Enables Task scheduling on the specified Compute Node.") @@ -1920,11 +2016,10 @@ scheduling state is enabled. You can enable Task scheduling on a Compute Node only if its current scheduling state is disabled """) - @example("./examples/NodeEnableScheduling.json", "Node enable scheduling") @route("/pools/{poolId}/nodes/{nodeId}/enablescheduling") @post - EnableScheduling is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + enableNodeScheduling is RpcOperation< + BatchClientRequestHeaders & { @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1933,14 +2028,17 @@ state is disabled @path nodeId: string; }, - BatchResponseHeaders & { + BatchResponseHeaders & { @doc("A process exit code.") - @statusCode code: "200"; + @statusCode + code: "200"; @header("DataServiceId") @doc("The OData ID of the resource to which the request applied.") DataServiceId: string; - } + }, + {}, + BatchError >; @summary("Gets the settings required for remote login to a Compute Node.") @@ -1951,11 +2049,10 @@ invoked only on Pools created with the virtual machine configuration property. For Pools created with a cloud service configuration, see the GetRemoteDesktop API. """) - @example("./examples/NodeGetRemoteLoginSettings.json", "Node get remote login settings") @route("/pools/{poolId}/nodes/{nodeId}/remoteloginsettings") @get - GetRemoteLoginSettings is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + getNodeRemoteLoginSettings is RpcOperation< + BatchClientRequestHeaders & { @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1964,7 +2061,9 @@ API. @path nodeId: string; }, - BatchResponseHeaders & ComputeNodeGetRemoteLoginSettingsResult + BatchResponseHeaders & BatchNodeRemoteLoginSettingsResult, + {}, + BatchError >; @summary("Gets the Remote Desktop Protocol file for the specified Compute Node.") @@ -1974,11 +2073,10 @@ user Account on the Compute Node. This API can only be invoked on Pools created with a cloud service configuration. For Pools created with a virtual machine configuration, see the GetRemoteLoginSettings API. """) - @example("./examples/NodeGetRemoteDesktop.json", "Get RDP file of the compute node") @route("/pools/{poolId}/nodes/{nodeId}/rdp") @get - GetRemoteDesktop is Azure.Core.Foundations.Operation< - BatchClientRequestHeaders & { + getNodeRemoteDesktopFile is RpcOperation< + BatchClientRequestHeaders & { @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; @@ -1991,12 +2089,12 @@ Protocol file. nodeId: string; }, BatchResponseHeaders & { - @header("content-type") contentType: "application/octet-stream"; - @body @doc("A response containing the file content.") file: bytes; - } + }, + {}, + BatchError >; @summary(""" @@ -2009,35 +2107,39 @@ from Compute Nodes if you are experiencing an error and wish to escalate to Azure support. The Azure Batch service log files should be shared with Azure support to aid in debugging issues with the Batch service. """) - @example("./examples/NodeUploadBatchServiceLogs.json", "Upload BatchService Logs") @route("/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs") @post - UploadBatchServiceLogs is Azure.Core.Foundations.Operation< + uploadNodeLogs is RpcOperation< BatchClientRequestHeaders & { - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; + @doc("Type of content") + @header("content-type") + contentType: "application/json; odata=minimalmetadata"; - @doc(""" + @doc("The ID of the Pool that contains the Compute Node.") + @path + poolId: string; + + @doc(""" The ID of the Compute Node for which you want to get the Remote Desktop Protocol file. """) - @path - nodeId: string; + @path + nodeId: string; - @doc("The Azure Batch service log files upload configuration.") - @body - uploadBatchServiceLogsConfiguration: UploadBatchServiceLogsConfiguration; - }, - BatchClientResponseHeaders & UploadBatchServiceLogsResult + @doc("The Azure Batch service log files upload options.") + @body + body: UploadBatchServiceLogsOptions; + }, + BatchClientResponseHeaders & UploadBatchServiceLogsResult, + {}, + BatchError >; @summary("Lists the Compute Nodes in the specified Pool.") @doc("Lists the Compute Nodes in the specified Pool.") - @example("./examples/NodeList.json", "Node list") @route("/pools/{poolId}/nodes") @get - List is Azure.Core.Foundations.Operation< + listNodes is RpcOperation< BatchApplicationListHeaders & { @doc("The ID of the Pool from which you want to list Compute Nodes.") @path @@ -2051,22 +2153,19 @@ https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#li $filter?: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ format: "csv" }) + $select?: string[]; }, - BatchResponseHeaders & ComputeNodeListResult + BatchResponseHeaders & BatchNodeListResult, + {}, + BatchError >; -} -@operationGroup -@tag("ComputeNodes") -interface ComputeNodeExtensions { @summary("Gets information about the specified Compute Node Extension.") @doc("Gets information about the specified Compute Node Extension.") - @example("./examples/ComputeNodeExtensionGet.json", "Get compute node extension") @route("/pools/{poolId}/nodes/{nodeId}/extensions/{extensionName}") @get - GetComputeNodeExtensions is Azure.Core.Foundations.Operation< + getNodeExtension is RpcOperation< BatchClientRequestHeaders & { @doc("The ID of the Pool that contains the Compute Node.") @path @@ -2084,214 +2183,318 @@ about. extensionName: string; @doc("An OData $select clause.") - @query - $select?: string; + @query({ + format: "csv", + }) + $select?: string[]; }, - BatchResponseHeaders & NodeVMExtension + BatchResponseHeaders & NodeVMExtension, + {}, + BatchError >; @summary("Lists the Compute Nodes Extensions in the specified Pool.") @doc("Lists the Compute Nodes Extensions in the specified Pool.") - @example("./examples/ComputeNodeExtensionList.json", "List compute node extensions") @route("/pools/{poolId}/nodes/{nodeId}/extensions") @get - ListComputeNodeExtensions is Azure.Core.Foundations.Operation< - BatchApplicationListHeaders & { - @doc("The ID of the Pool that contains Compute Node.") + listNodeExtensions( + ...BatchApplicationListHeaders, + + @doc("The ID of the Pool that contains Compute Node.") + @path + poolId: string, + + @doc("The ID of the Compute Node that you want to list extensions.") + @path + nodeId: string, + + @doc("An OData $select clause.") + @query({ + format: "csv", + }) + $select?: string[] + ): (BatchResponseHeaders & NodeVMExtensionList) | BatchError; + + @summary("Deletes the specified file from the Compute Node.") + @doc("Deletes the specified file from the Compute Node.") + @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") + @delete + deleteNodeFile is RpcOperation< + BatchNodeFileClientPathParameters & { + @doc(""" +Whether to delete children of a directory. If the filePath parameter represents +a directory instead of a file, you can set recursive to true to delete the +directory and all of the files and subdirectories in it. If recursive is false +then the directory must be empty or deletion will fail. +""") + @query + recursive?: boolean; + }, + BatchClientResponseHeaders & { + @doc("A process exit code.") + @statusCode + code: "200"; + }, + {}, + BatchError + >; + + @doc("Returns the content of the specified Compute Node file.") + @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") + @get + getNodeFile is RpcOperation< + BatchNodeFileClientPathParameters & + BatchModifiedSinceHeaders & { + @doc(""" +The byte range to be retrieved. The default is to retrieve the entire file. The +format is bytes=startRange-endRange. +""") + @header + `ocp-range`?: string; + }, + BatchResponseHeaders & + FileResponse & { + @body + @doc("A response containing the file content.") + file: bytes; + }, + {}, + BatchError + >; + + @doc("Gets the properties of the specified Compute Node file.") + @route("/pools/{poolId}/nodes/{nodeId}/files/{filePath}") + @head + getNodeFileProperties is RpcOperation< + BatchNodeFileClientPathParameters & BatchModifiedSinceHeaders, + BatchResponseHeaders & FileResponse, + {}, + BatchError + >; + + @summary("Lists all of the files in Task directories on the specified Compute Node.") + @doc("Lists all of the files in Task directories on the specified Compute Node.") + @route("/pools/{poolId}/nodes/{nodeId}/files") + @get + listNodeFiles is RpcOperation< + BatchApplicationListHeaders & { + @doc("The ID of the Pool that contains the Compute Node.") @path poolId: string; - @doc("The ID of the Compute Node that you want to list extensions.") + @doc("The ID of the Compute Node whose files you want to list.") @path nodeId: string; - @doc("An OData $select clause.") + @doc(""" +An OData $filter clause. For more information on constructing this filter, see +https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. +""") + @query + $filter?: string; + + @doc("Whether to list children of a directory.") @query - $select?: string; + recursive?: boolean; }, - BatchResponseHeaders & NodeVMExtensionList + BatchResponseHeaders & NodeFileListResult, + {}, + BatchError >; } - - // headers //////////////////// +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("The request to the Batch service was successful.") model PoolRemoveNodesResponseHeaders { ...BatchResponseHeaders; + @doc("A process exit code.") - @statusCode code: "202"; + @statusCode + code: "202"; @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; + DataServiceId: string; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("The request to the Batch service was successful.") model PoolUpdatePropertiesResponseHeaders { ...BatchResponseHeaders; + @doc("A process exit code.") - @statusCode code: "204"; + @statusCode + code: "204"; @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; + DataServiceId: string; } alias PoolStopResizeResponseHeaders = PoolResizeResponseHeaders; +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("The request to the Batch service was successful.") model PoolResizeResponseHeaders { ...BatchResponseHeaders; + @doc("A process exit code.") - @statusCode code: "202"; + @statusCode + code: "202"; @doc("The OData ID of the resource to which the request applied") @header("DataServiceId") - "DataServiceId": string; + DataServiceId: string; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("Common header values for pool requests") model FileResponse { - @doc("A process exit code.") - @statusCode code: "200"; - - @header - @doc("The file creation time.") - @format("date-time-rfc1123") - "ocp-creation-time"?: string; + @doc("A process exit code.") + @statusCode + code: "200"; - @header - @doc("Whether the object represents a directory.") - "ocp-batch-file-isdirectory": boolean; + @header + @doc("The file creation time.") + @encode(DateTimeKnownEncoding.rfc7231) + `ocp-creation-time`?: utcDateTime; - @header - @doc("The URL of the file.") - "ocp-batch-file-url": string; + @header + @doc("Whether the object represents a directory.") + `ocp-batch-file-isdirectory`: boolean; - @header - @doc("The file mode attribute in octal format.") - "ocp-batch-file-mode": string; + @header + @doc("The URL of the file.") + `ocp-batch-file-url`: string; - @header - @doc("The length of the file.") - "Content-Length": int64; + @header + @doc("The file mode attribute in octal format.") + `ocp-batch-file-mode`: string; - //@header - //@doc("The content type of the file.") - //"content-Type": string; - } + @header + @doc("The length of the file.") + `Content-Length`: int64; +} +///////////////////////////////////// Headers models /////////////////////////////////// @doc("Common header values for pool requests") model BatchPoolHeaders extends BatchMatchHeaders { - - @doc("The ID of the Pool to get.") - @path - poolId: string; - } + @doc("The ID of the Pool to get.") + @path + poolId: string; +} +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("Common header values for pool requests") -model BatchMatchHeaders extends BatchModifiedSinceHeaders{ - - @doc(""" +model BatchMatchHeaders extends BatchModifiedSinceHeaders { + @doc(""" An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client. """) - @header - "If-Match"?: string; + @header + @projectedName("python", "if__match") + `If-Match`?: string; - @doc(""" + @doc(""" An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client. """) - @header - "If-None-Match"?: string; - } + @header + @projectedName("python", "if_none_match") + `If-None-Match`?: string; +} +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("Common header values for modified headers") -model BatchModifiedSinceHeaders{ - @doc(""" +model BatchModifiedSinceHeaders { + @doc(""" A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time. """) - @header - @format("date-time-rfc1123") - "If-Modified-Since"?: string; + @header + @encode(DateTimeKnownEncoding.rfc7231) + @projectedName("python", "if_modified_since") + `If-Modified-Since`?: utcDateTime; - @doc(""" + @doc(""" A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time. """) - @header - @format("date-time-rfc1123") - "If-Unmodified-Since"?: string; + @header + @encode(DateTimeKnownEncoding.rfc7231) + @projectedName("python", "if_unmodified_since") + `If-Unmodified-Since`?: utcDateTime; } // This is the standard set of headers that most of batch apis return -alias BatchResponseHeaders = BatchClientResponseHeaders & BatchEtagResponseHeaders; +alias BatchResponseHeaders = BatchClientResponseHeaders & + BatchEtagResponseHeaders; +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("Etag related response header values") model BatchEtagResponseHeaders { @doc("The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers.") @header + @projectedName("python", "etag") ETag?: string; @doc("The time at which the resource was last modified.") @header - @format("date-time-rfc1123") - "Last-Modified"?: string; + @encode(DateTimeKnownEncoding.rfc7231) + `Last-Modified`?: utcDateTime; } +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("Client related response header values") model BatchClientResponseHeaders { @doc("The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true.") @header - @format("uuid") - "client-request-id"?: string; + `client-request-id`?: string; @doc("A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in.") @header - @format("uuid") - "request-id"?: string; + `request-id`?: string; } @doc("A response containing headers related to the Pool, if it exists.") model PoolDoesExistResponseHeaders { ...BatchResponseHeaders; + @doc("A process exit code.") - @statusCode code: "200"; + @statusCode + code: "200"; } @doc("The Pool does not exist.") model PoolDoesntExistResponseHeaders { @doc("A process exit code.") - @statusCode code: "404"; + @statusCode + code: "404"; } - +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("The parameters for a widget status request") model DeleteResponseHeaders { @doc("A process exit code.") - @statusCode code: "202"; + @statusCode + code: "202"; @doc("The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true.") @header - "client-request-id"?: string; + `client-request-id`?: string; @doc("A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in.") @header - "request-id"?: string; + `request-id`?: string; } - - - @doc("The parameters for a widget status request") model BatchApplicationListHeaders extends BatchClientRequestHeaders { @doc(""" @@ -2301,52 +2504,50 @@ applications can be returned. @query @minValue(1) @maxValue(1000) - @extension("x-ms-parameter-grouping",{"postfix": "Options"}) maxresults?: int32 = 1000; + #suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc(""" The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. """) @header - @format("date-time-rfc1123") - @extension("x-ms-parameter-grouping",{"postfix": "Options"}) - "ocp-date"?: string; + @encode(DateTimeKnownEncoding.rfc7231) + `ocp-date`?: utcDateTime; } -@doc("Common header parms for Pool related File operartions") -model BatchPoolFileClientRequestHeaders extends BatchClientRequestHeaders{ - @doc("The ID of the Pool that contains the Compute Node.") - @path - poolId: string; +@doc("Common path parms for Node related File operartions") +model BatchNodeFileClientPathParameters extends BatchClientRequestHeaders { + @doc("The ID of the Pool that contains the Compute Node.") + @path + poolId: string; - @doc("The ID of the Compute Node from which you want to delete the file.") - @path - nodeId: string; + @doc("The ID of the Compute Node from which you want to delete the file.") + @path + nodeId: string; - @doc("The path to the file or directory that you want to delete.") - @path - filePath: string; + @doc("The path to the file or directory that you want to delete.") + @path + filePath: string; } -@doc("Common header parms for Job related File operartions") -model BatchJobFileClientRequestHeaders extends BatchClientRequestHeaders{ - @doc("The ID of the Job that contains the Task.") - @path - jobId: string; +@doc("Common path parms for Task related File operartions") +model BatchTaskFileClientPathParameters extends BatchClientRequestHeaders { + @doc("The ID of the Job that contains the Task.") + @path + jobId: string; - @doc("The ID of the Task whose file you want to retrieve.") - @path - taskId: string; + @doc("The ID of the Task whose file you want to retrieve.") + @path + taskId: string; - @doc("The path to the Task file that you want to get the content of.") - @path - filePath: string; + @doc("The path to the Task file that you want to get the content of.") + @path + filePath: string; } - - +#suppress "@azure-tools/typespec-azure-core/casing-style" "The names of Model types must use camelCase" @doc("The parameters for a widget status request") model BatchClientRequestHeaders { @doc(""" @@ -2354,7 +2555,7 @@ The maximum number of items to return in the response. A maximum of 1000 applications can be returned. """) @query - @extension("x-ms-parameter-grouping",{"postfix": "Options"}) + @projectedName("client", "timeOutInSeconds") timeOut?: int32 = 30; @doc(""" @@ -2362,14 +2563,11 @@ The caller-generated request identity, in the form of a GUID with no decoration such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. """) @header - @extension("x-ms-parameter-grouping",{"postfix": "Options"}) - @format("uuid") - "client-request-id"?: string; + `client-request-id`?: string; @doc("Whether the server should return the client-request-id in the response.") @header - @extension("x-ms-parameter-grouping",{"postfix": "Options"}) - "return-client-request-id"?: boolean = false; + `return-client-request-id`?: boolean = false; @doc(""" The time the request was issued. Client libraries typically set this to the @@ -2377,36 +2575,6 @@ current system clock time; set it explicitly if you are calling the REST API directly. """) @header - @extension("x-ms-parameter-grouping",{"postfix": "Options"}) - @format("date-time-rfc1123") - "ocp-date"?: string; + @encode(DateTimeKnownEncoding.rfc7231) + `ocp-date`?: utcDateTime; } - - - -@doc("The parameters for a widget status request") -model Pool_ListUsageMetricRequestHeaders { - - @doc(""" -The earliest time from which to include metrics. This must be at least two and -a half hours before the current time. If not specified this defaults to the -start time of the last aggregation interval currently available. -""") - @query - starttime?: utcDateTime; - - @doc(""" -The latest time from which to include metrics. This must be at least two hours -before the current time. If not specified this defaults to the end time of the -last aggregation interval currently available. -""") - @query - endtime?: utcDateTime; - - @doc(""" -An OData $filter clause. For more information on constructing this filter, see -https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. -""") - @query - $filter?: string; -} \ No newline at end of file diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/.eslintrc.json b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/.eslintrc.json similarity index 100% rename from packages/typespec-test/test/batch/generated/typespec-ts/.eslintrc.json rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/.eslintrc.json diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/README.md b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/README.md similarity index 82% rename from packages/typespec-test/test/batch/generated/typespec-ts/README.md rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/README.md index 37d9006e11..6687c542fc 100644 --- a/packages/typespec-test/test/batch/generated/typespec-ts/README.md +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/README.md @@ -1,13 +1,13 @@ -# Azure BatchService REST client library for JavaScript +# Azure ChatProtocol REST client library for JavaScript -A client for issuing REST requests to the Azure Batch service. +Azure APIs for the Azure Chat protocol. **Please rely heavily on our [REST client docs](https://github.com/Azure/azure-sdk-for-js/blob/main/documentation/rest-clients.md) to use this library** Key links: -- [Package (NPM)](https://www.npmjs.com/package/@azure-rest/batch) -- [API reference documentation](https://docs.microsoft.com/javascript/api/@azure-rest/batch?view=azure-node-preview) +- [Package (NPM)](https://www.npmjs.com/package/@azure/ai-chat-protocol) +- [API reference documentation](https://docs.microsoft.com/javascript/api/@azure/ai-chat-protocol?view=azure-node-preview) ## Getting started @@ -19,15 +19,15 @@ Key links: - You must have an [Azure subscription](https://azure.microsoft.com/free/) to use this package. -### Install the `@azure-rest/batch` package +### Install the `@azure/ai-chat-protocol` package -Install the Azure BatchService REST client REST client library for JavaScript with `npm`: +Install the Azure ChatProtocol REST client REST client library for JavaScript with `npm`: ```bash -npm install @azure-rest/batch +npm install @azure/ai-chat-protocol ``` -### Create and authenticate a `BatchServiceClient` +### Create and authenticate a `ChatProtocolClient` To use an [Azure Active Directory (AAD) token credential](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/identity/identity/samples/AzureIdentityExamples.md#authenticating-with-a-pre-fetched-access-token), provide an instance of the desired credential type obtained from the diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/api-extractor.json b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/api-extractor.json similarity index 90% rename from packages/typespec-test/test/batch/generated/typespec-ts/api-extractor.json rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/api-extractor.json index 5513f0e9b8..56a7261c20 100644 --- a/packages/typespec-test/test/batch/generated/typespec-ts/api-extractor.json +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/api-extractor.json @@ -6,7 +6,7 @@ "dtsRollup": { "enabled": true, "untrimmedFilePath": "", - "publicTrimmedFilePath": "./types/batch.d.ts" + "publicTrimmedFilePath": "./types/ai-chat-protocol.d.ts" }, "messages": { "tsdocMessageReporting": { "default": { "logLevel": "none" } }, diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/karma.conf.js b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/karma.conf.js similarity index 100% rename from packages/typespec-test/test/batch/generated/typespec-ts/karma.conf.js rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/karma.conf.js diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/package.json b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/package.json similarity index 77% rename from packages/typespec-test/test/batch/generated/typespec-ts/package.json rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/package.json index 3821c13964..5817d4a0f7 100644 --- a/packages/typespec-test/test/batch/generated/typespec-ts/package.json +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/package.json @@ -1,20 +1,36 @@ { - "name": "@azure-rest/batch", + "name": "@azure/ai-chat-protocol", "sdk-type": "client", "author": "Microsoft Corporation", "version": "1.0.0-beta.1", - "description": "Batch Service", + "description": "undefined", "keywords": ["node", "azure", "cloud", "typescript", "browser", "isomorphic"], "license": "MIT", + "type": "module", "main": "dist/index.js", "module": "./dist-esm/src/index.js", - "types": "./types/batch.d.ts", + "types": "./types/ai-chat-protocol.d.ts", + "exports": { + ".": { + "types": "./types/src/index.d.ts", + "require": "./dist/index.cjs", + "import": "./dist-esm/src/index.js" + }, + "./api": { + "types": "./types/src/api/index.d.ts", + "import": "./dist-esm/src/api/index.js" + }, + "./models": { + "types": "./types/src/models/index.d.ts", + "import": "./dist-esm/src/models/index.js" + } + }, "repository": "github:Azure/azure-sdk-for-js", "bugs": { "url": "https://github.com/Azure/azure-sdk-for-js/issues" }, "files": [ "dist/", "dist-esm/src/", - "types/batch.d.ts", + "types/ai-chat-protocol.d.ts", "README.md", "LICENSE", "review/*" @@ -28,13 +44,13 @@ "build:test": "tsc -p . && rollup -c 2>&1", "build:debug": "echo skipped.", "check-format": "prettier --list-different --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"*.{js,json}\" \"test/**/*.ts\"", - "clean": "rimraf dist dist-browser dist-esm test-dist temp types *.tgz *.log", + "clean": "rimraf --glob dist dist-browser dist-esm test-dist temp types *.tgz *.log", "execute:samples": "echo skipped", "extract-api": "rimraf review && mkdirp ./review && api-extractor run --local", "format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"*.{js,json}\" \"test/**/*.ts\"", "generate:client": "echo skipped", "integration-test:browser": "karma start --single-run", - "integration-test:node": "nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 5000000 --full-trace \"dist-esm/test/{,!(browser)/**/}*.spec.js\"", + "integration-test:node": "nyc mocha --require source-map-support/register.js --timeout 5000000 --full-trace \"dist-esm/test/{,!(browser)/**/}*.spec.js\"", "integration-test": "npm run integration-test:node && npm run integration-test:browser", "lint:fix": "eslint package.json api-extractor.json src test --ext .ts --fix --fix-type [problem,suggestion]", "lint": "eslint package.json api-extractor.json src test --ext .ts", @@ -43,7 +59,7 @@ "test:node": "npm run clean && npm run build:test && npm run unit-test:node", "test": "npm run clean && npm run build:test && npm run unit-test", "unit-test": "npm run unit-test:node && npm run unit-test:browser", - "unit-test:node": "mocha -r esm --require ts-node/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"test/{,!(browser)/**/}*.spec.ts\"", + "unit-test:node": "mocha --full-trace \"test/{,!(browser)/**/}*.spec.ts\"", "unit-test:browser": "karma start --single-run", "build": "npm run clean && tsc && rollup -c 2>&1 && npm run minify && mkdirp ./review && npm run extract-api", "minify": "uglifyjs -c -m --comments --source-map \"content='./dist/index.js.map'\" -o ./dist/index.min.js ./dist/index.js" @@ -52,11 +68,11 @@ "autoPublish": false, "dependencies": { "@azure/core-auth": "^1.3.0", - "@azure-rest/core-client": "^1.1.3", - "@azure/core-rest-pipeline": "^1.8.0", + "@azure-rest/core-client": "^1.1.4", + "@azure/core-rest-pipeline": "^1.12.0", "@azure/logger": "^1.0.0", "tslib": "^2.2.0", - "@azure/core-paging": "^1.5.0" + "@azure/core-util": "^1.4.0" }, "devDependencies": { "@microsoft/api-extractor": "^7.31.1", @@ -95,9 +111,15 @@ "karma-source-map-support": "~1.4.0", "karma-sourcemap-loader": "^0.4.0", "karma": "^6.2.0", - "c8": "^8.0.0" + "c8": "^8.0.0", + "ts-node": "^10.0.0" }, "browser": { "./dist-esm/test/public/utils/env.js": "./dist-esm/test/public/utils/env.browser.js" + }, + "mocha": { + "extension": ["ts"], + "timeout": "1200000", + "loader": "ts-node/esm" } } diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/review/ai-chat-protocol.api.md b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/review/ai-chat-protocol.api.md new file mode 100644 index 0000000000..5cd0b0375f --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/review/ai-chat-protocol.api.md @@ -0,0 +1,99 @@ +## API Report File for "@azure/ai-chat-protocol" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { Pipeline } from '@azure/core-rest-pipeline'; +import { TokenCredential } from '@azure/core-auth'; + +// @public +export interface ChatChoice { + context?: Record; + finishReason: FinishReason; + index: number; + message: ChatMessage; + sessionState?: unknown; +} + +// @public +export interface ChatCompletion { + choices: ChatChoice[]; +} + +// @public +export interface ChatCompletionChunk { + choices: ChoiceDelta[]; +} + +// @public +export interface ChatCompletionOptions { + context?: Record; + messages: ChatMessage[]; + sessionState?: unknown; + stream: false; +} + +// @public +export interface ChatMessage { + content: string; + role: ChatRole; + sessionState?: unknown; +} + +// @public +export interface ChatMessageDelta { + content?: string; + role?: ChatRole; + sessionState?: unknown; +} + +// @public (undocumented) +export class ChatProtocolClient { + constructor(endpoint: string, credential: KeyCredential | TokenCredential, options?: ChatProtocolClientOptions); + create(body: ChatCompletionOptions, options?: CreateOptions): Promise; + createStreaming(body: StreamingChatCompletionOptions, options?: CreateStreamingOptions): Promise; + readonly pipeline: Pipeline; +} + +// @public (undocumented) +export interface ChatProtocolClientOptions extends ClientOptions { +} + +// @public +export type ChatRole = string; + +// @public +export interface ChoiceDelta { + context?: Record; + delta: ChatMessageDelta; + finishReason?: FinishReason; + index: number; + sessionState?: unknown; +} + +// @public (undocumented) +export interface CreateOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateStreamingOptions extends OperationOptions { +} + +// @public +export type FinishReason = string; + +// @public +export interface StreamingChatCompletionOptions { + context?: Record; + messages: ChatMessage[]; + sessionState?: unknown; + stream: true; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/rollup.config.js b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/rollup.config.js similarity index 100% rename from packages/typespec-test/test/batch/generated/typespec-ts/rollup.config.js rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/rollup.config.js diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/ChatProtocolClient.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/ChatProtocolClient.ts new file mode 100644 index 0000000000..62680cb7d8 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/ChatProtocolClient.ts @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TokenCredential, KeyCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; +import { + StreamingChatCompletionOptions, + ChatCompletionChunk, + ChatCompletionOptions, + ChatCompletion, +} from "./models/models.js"; +import { CreateStreamingOptions, CreateOptions } from "./models/options.js"; +import { + createChatProtocol, + ChatProtocolClientOptions, + ChatProtocolContext, + createStreaming, + create, +} from "./api/index.js"; + +export { ChatProtocolClientOptions } from "./api/ChatProtocolContext.js"; + +export class ChatProtocolClient { + private _client: ChatProtocolContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + /** Azure APIs for the Azure Chat protocol. */ + constructor( + endpoint: string, + credential: KeyCredential | TokenCredential, + options: ChatProtocolClientOptions = {} + ) { + this._client = createChatProtocol(endpoint, credential, options); + this.pipeline = this._client.pipeline; + } + + /** Creates a new streaming chat completion. */ + createStreaming( + body: StreamingChatCompletionOptions, + options: CreateStreamingOptions = { requestOptions: {} } + ): Promise { + return createStreaming(this._client, body, options); + } + + /** Creates a new chat completion. */ + create( + body: ChatCompletionOptions, + options: CreateOptions = { requestOptions: {} } + ): Promise { + return create(this._client, body, options); + } +} diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/ChatProtocolContext.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/ChatProtocolContext.ts new file mode 100644 index 0000000000..bedabad2a2 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/ChatProtocolContext.ts @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TokenCredential, KeyCredential } from "@azure/core-auth"; +import { ClientOptions } from "@azure-rest/core-client"; +import { ChatProtocolContext } from "../rest/index.js"; +import getClient from "../rest/index.js"; + +export interface ChatProtocolClientOptions extends ClientOptions {} + +export { ChatProtocolContext } from "../rest/index.js"; + +/** Azure APIs for the Azure Chat protocol. */ +export function createChatProtocol( + endpoint: string, + credential: KeyCredential | TokenCredential, + options: ChatProtocolClientOptions = {} +): ChatProtocolContext { + const clientContext = getClient(endpoint, credential, options); + return clientContext; +} diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/index.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/index.ts new file mode 100644 index 0000000000..8b1f1576f5 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/index.ts @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { + createChatProtocol, + ChatProtocolClientOptions, + ChatProtocolContext, +} from "./ChatProtocolContext.js"; +export { createStreaming, create } from "./operations.js"; diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/operations.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/operations.ts new file mode 100644 index 0000000000..2de11a9681 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/api/operations.ts @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { + StreamingChatCompletionOptions, + ChatCompletionChunk, + ChatCompletionOptions, + ChatCompletion, +} from "../models/models.js"; +import { + ChatProtocolContext as Client, + Create200Response, + CreateStreaming200Response, +} from "../rest/index.js"; +import { + StreamableMethod, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; +import { CreateStreamingOptions, CreateOptions } from "../models/options.js"; + +export function _createStreamingSend( + context: Client, + body: StreamingChatCompletionOptions, + options: CreateStreamingOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/chat") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + messages: body.messages as any, + stream: body["stream"], + session_state: body["sessionState"], + context: body["context"], + }, + }); +} + +export async function _createStreamingDeserialize( + result: CreateStreaming200Response +): Promise { + if (result.status !== "200") { + throw result.body; + } + + return { + choices: (result.body["choices"] ?? []).map((p) => ({ + index: p["index"], + delta: { + content: p.delta["content"], + role: p.delta["role"], + sessionState: p.delta["session_state"], + }, + sessionState: p["session_state"], + context: p["context"], + finishReason: p["finish_reason"], + })), + }; +} + +/** Creates a new streaming chat completion. */ +export async function createStreaming( + context: Client, + body: StreamingChatCompletionOptions, + options: CreateStreamingOptions = { requestOptions: {} } +): Promise { + const result = await _createStreamingSend(context, body, options); + return _createStreamingDeserialize(result); +} + +export function _createSend( + context: Client, + body: ChatCompletionOptions, + options: CreateOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/chat") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + messages: body.messages as any, + stream: body["stream"], + session_state: body["sessionState"], + context: body["context"], + }, + }); +} + +export async function _createDeserialize( + result: Create200Response +): Promise { + if (result.status !== "200") { + throw result.body; + } + + return { + choices: (result.body["choices"] ?? []).map((p) => ({ + index: p["index"], + message: p.message as any, + sessionState: p["session_state"], + context: p["context"], + finishReason: p["finish_reason"], + })), + }; +} + +/** Creates a new chat completion. */ +export async function create( + context: Client, + body: ChatCompletionOptions, + options: CreateOptions = { requestOptions: {} } +): Promise { + const result = await _createSend(context, body, options); + return _createDeserialize(result); +} diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/index.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/index.ts new file mode 100644 index 0000000000..919ec8ab39 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/index.ts @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { + ChatProtocolClient, + ChatProtocolClientOptions, +} from "./ChatProtocolClient.js"; +export { + StreamingChatCompletionOptions, + ChatMessage, + ChatRole, + ChatCompletionChunk, + ChoiceDelta, + ChatMessageDelta, + FinishReason, + ChatCompletionOptions, + ChatCompletion, + ChatChoice, + CreateStreamingOptions, + CreateOptions, +} from "./models/index.js"; diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/src/logger.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/logger.ts similarity index 67% rename from packages/typespec-test/test/batch/generated/typespec-ts/src/logger.ts rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/logger.ts index fd60a2285f..82c2280099 100644 --- a/packages/typespec-test/test/batch/generated/typespec-ts/src/logger.ts +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/logger.ts @@ -2,4 +2,4 @@ // Licensed under the MIT license. import { createClientLogger } from "@azure/logger"; -export const logger = createClientLogger("batch"); +export const logger = createClientLogger("ai-chat-protocol"); diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/index.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/index.ts new file mode 100644 index 0000000000..a69c499179 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/index.ts @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { + StreamingChatCompletionOptions, + ChatMessage, + ChatRole, + ChatCompletionChunk, + ChoiceDelta, + ChatMessageDelta, + FinishReason, + ChatCompletionOptions, + ChatCompletion, + ChatChoice, +} from "./models.js"; +export { CreateStreamingOptions, CreateOptions } from "./options.js"; diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/models.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/models.ts new file mode 100644 index 0000000000..d6ce42536f --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/models.ts @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/** The configuration for a streaming chat completion request. */ +export interface StreamingChatCompletionOptions { + /** The collection of context messages associated with this completion request. */ + messages: ChatMessage[]; + /** Indicates whether the completion is a streaming or non-streaming completion. */ + stream: true; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + sessionState?: unknown; + /** + * Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + * customer_info. These parameters are specific to the chat app and not understood by the generic clients. + */ + context?: Record; +} + +/** A single, role-attributed message within a chat completion interaction. */ +export interface ChatMessage { + /** The text associated with the message. */ + content: string; + /** The role associated with the message. */ + role: ChatRole; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + sessionState?: unknown; +} + +/** A representation of the intended purpose of a message. */ +/** "user", "system", "assistant" */ +export type ChatRole = string; + +/** A single response to a streaming completion request. */ +export interface ChatCompletionChunk { + /** The collection of choice deltas received in this chunk. */ + choices: ChoiceDelta[]; +} + +/** The representation of an incremental choice received in a streaming completion. */ +export interface ChoiceDelta { + /** The index of the of the chat choice, relative to the other choices in the same completion. */ + index: number; + /** The partial message received for this choice. */ + delta: ChatMessageDelta; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + sessionState?: unknown; + /** + * Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + * customer_info. These parameters are specific to the chat app and not understood by the generic clients. + */ + context?: Record; + /** The reason this chat completion completed its generation. */ + finishReason?: FinishReason; +} + +/** The representation of a delta message received in a streaming completion. */ +export interface ChatMessageDelta { + /** An incremental part of the text associated with the message. */ + content?: string; + /** The role associated with the message. */ + role?: ChatRole; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + sessionState?: unknown; +} + +/** Representation of the reason why a chat session has finished processing. */ +/** "stop", "length" */ +export type FinishReason = string; + +/** The configuration for a chat completion request. */ +export interface ChatCompletionOptions { + /** The collection of context messages associated with this completion request. */ + messages: ChatMessage[]; + /** Indicates whether the completion is a streaming or non-streaming completion. */ + stream: false; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + sessionState?: unknown; + /** + * Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + * customer_info. These parameters are specific to the chat app and not understood by the generic clients. + */ + context?: Record; +} + +/** Representation of the response to a chat completion request. */ +export interface ChatCompletion { + /** The collection of generated completions. */ + choices: ChatChoice[]; +} + +/** The representation of a single generated completion. */ +export interface ChatChoice { + /** The index of the of the chat choice, relative to the other choices in the same completion. */ + index: number; + /** The chat message for a given chat completion. */ + message: ChatMessage; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + sessionState?: unknown; + /** + * Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + * customer_info. These parameters are specific to the chat app and not understood by the generic clients. + */ + context?: Record; + /** The reason this chat completion completed its generation. */ + finishReason: FinishReason; +} diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/options.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/options.ts new file mode 100644 index 0000000000..155d5f9c49 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/models/options.ts @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { OperationOptions } from "@azure-rest/core-client"; + +export interface CreateStreamingOptions extends OperationOptions {} + +export interface CreateOptions extends OperationOptions {} diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/batchServiceClient.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/chatProtocolClient.ts similarity index 57% rename from packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/batchServiceClient.ts rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/chatProtocolClient.ts index 8489e15fba..20b1942f2b 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/rest/batchServiceClient.ts +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/chatProtocolClient.ts @@ -2,31 +2,24 @@ // Licensed under the MIT license. import { getClient, ClientOptions } from "@azure-rest/core-client"; -import { logger } from "../logger"; -import { TokenCredential } from "@azure/core-auth"; -import { BatchServiceContext } from "./clientDefinitions.js"; +import { logger } from "../logger.js"; +import { TokenCredential, KeyCredential } from "@azure/core-auth"; +import { ChatProtocolContext } from "./clientDefinitions.js"; /** - * Initialize a new instance of `BatchServiceContext` - * @param endpoint - The parameter endpoint + * Initialize a new instance of `ChatProtocolContext` + * @param endpoint - A sequence of textual characters. * @param credentials - uniquely identify client credential * @param options - the parameter for all optional parameters */ export default function createClient( endpoint: string, - credentials: TokenCredential, + credentials: TokenCredential | KeyCredential, options: ClientOptions = {} -): BatchServiceContext { +): ChatProtocolContext { const baseUrl = options.baseUrl ?? `${endpoint}`; - options.apiVersion = options.apiVersion ?? "2022-10-01.16.0"; - options = { - ...options, - credentials: { - scopes: ["https://batch.core.windows.net//.default"], - }, - }; - - const userAgentInfo = `azsdk-js-batch-rest/1.0.0-beta.1`; + options.apiVersion = options.apiVersion ?? "2023-10-01-preview"; + const userAgentInfo = `azsdk-js-ai-chat-protocol-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? `${options.userAgentOptions.userAgentPrefix} ${userAgentInfo}` @@ -39,13 +32,17 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [`${baseUrl}/.default`], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, }; const client = getClient( baseUrl, credentials, options - ) as BatchServiceContext; + ) as ChatProtocolContext; return client; } diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/clientDefinitions.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/clientDefinitions.ts new file mode 100644 index 0000000000..4df746c293 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/clientDefinitions.ts @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { CreateStreamingParameters, CreateParameters } from "./parameters.js"; +import { CreateStreaming200Response, Create200Response } from "./responses.js"; +import { Client, StreamableMethod } from "@azure-rest/core-client"; + +export interface CreateStreaming { + /** Creates a new streaming chat completion. */ + post( + options?: CreateStreamingParameters + ): StreamableMethod; + /** Creates a new chat completion. */ + post(options?: CreateParameters): StreamableMethod; +} + +export interface Routes { + /** Resource for '/chat' has methods for the following verbs: post */ + (path: "/chat"): CreateStreaming; +} + +export type ChatProtocolContext = Client & { + path: Routes; +}; diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/index.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/index.ts new file mode 100644 index 0000000000..5cfebb5789 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/index.ts @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import ChatProtocolClient from "./chatProtocolClient.js"; + +export * from "./chatProtocolClient.js"; +export * from "./parameters.js"; +export * from "./responses.js"; +export * from "./clientDefinitions.js"; +export * from "./models.js"; +export * from "./outputModels.js"; + +export default ChatProtocolClient; diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/models.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/models.ts new file mode 100644 index 0000000000..56d4bada0e --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/models.ts @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/** The configuration for a streaming chat completion request. */ +export interface StreamingChatCompletionOptions { + /** The collection of context messages associated with this completion request. */ + messages: Array; + /** Indicates whether the completion is a streaming or non-streaming completion. */ + stream: true; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + session_state?: unknown; + /** + * Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + * customer_info. These parameters are specific to the chat app and not understood by the generic clients. + */ + context?: Record; +} + +/** A single, role-attributed message within a chat completion interaction. */ +export interface ChatMessage { + /** The text associated with the message. */ + content: string; + /** + * The role associated with the message. + * + * Possible values: user, system, assistant + */ + role: string; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + session_state?: unknown; +} + +/** The configuration for a chat completion request. */ +export interface ChatCompletionOptions { + /** The collection of context messages associated with this completion request. */ + messages: Array; + /** Indicates whether the completion is a streaming or non-streaming completion. */ + stream: false; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + session_state?: unknown; + /** + * Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + * customer_info. These parameters are specific to the chat app and not understood by the generic clients. + */ + context?: Record; +} diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/outputModels.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/outputModels.ts new file mode 100644 index 0000000000..86821eecb0 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/outputModels.ts @@ -0,0 +1,104 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/** A single, role-attributed message within a chat completion interaction. */ +export interface ChatMessageOutput { + /** The text associated with the message. */ + content: string; + /** + * The role associated with the message. + * + * Possible values: user, system, assistant + */ + role: string; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + session_state?: any; +} + +/** A single response to a streaming completion request. */ +export interface ChatCompletionChunkOutput { + /** The collection of choice deltas received in this chunk. */ + choices: Array; +} + +/** The representation of an incremental choice received in a streaming completion. */ +export interface ChoiceDeltaOutput { + /** The index of the of the chat choice, relative to the other choices in the same completion. */ + index: number; + /** The partial message received for this choice. */ + delta: ChatMessageDeltaOutput; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + session_state?: any; + /** + * Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + * customer_info. These parameters are specific to the chat app and not understood by the generic clients. + */ + context?: Record; + /** + * The reason this chat completion completed its generation. + * + * Possible values: stop, length + */ + finish_reason?: string; +} + +/** The representation of a delta message received in a streaming completion. */ +export interface ChatMessageDeltaOutput { + /** An incremental part of the text associated with the message. */ + content?: string; + /** + * The role associated with the message. + * + * Possible values: user, system, assistant + */ + role?: string; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + session_state?: any; +} + +/** Representation of the response to a chat completion request. */ +export interface ChatCompletionOutput { + /** The collection of generated completions. */ + choices: Array; +} + +/** The representation of a single generated completion. */ +export interface ChatChoiceOutput { + /** The index of the of the chat choice, relative to the other choices in the same completion. */ + index: number; + /** The chat message for a given chat completion. */ + message: ChatMessageOutput; + /** + * Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + * being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + * sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + * conversations or user preferences. + */ + session_state?: any; + /** + * Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + * customer_info. These parameters are specific to the chat app and not understood by the generic clients. + */ + context?: Record; + /** + * The reason this chat completion completed its generation. + * + * Possible values: stop, length + */ + finish_reason: string; +} diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/parameters.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/parameters.ts new file mode 100644 index 0000000000..607d2f980c --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/parameters.ts @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { RequestParameters } from "@azure-rest/core-client"; +import { + StreamingChatCompletionOptions, + ChatCompletionOptions, +} from "./models.js"; + +export interface CreateStreamingBodyParam { + body?: StreamingChatCompletionOptions; +} + +export type CreateStreamingParameters = CreateStreamingBodyParam & + RequestParameters; + +export interface CreateBodyParam { + body?: ChatCompletionOptions; +} + +export type CreateParameters = CreateBodyParam & RequestParameters; diff --git a/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/responses.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/responses.ts new file mode 100644 index 0000000000..471c422945 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/src/rest/responses.ts @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { HttpResponse } from "@azure-rest/core-client"; +import { + ChatCompletionChunkOutput, + ChatCompletionOutput, +} from "./outputModels.js"; + +/** The request has succeeded. */ +export interface CreateStreaming200Response extends HttpResponse { + status: "200"; + body: ChatCompletionChunkOutput; +} + +/** The request has succeeded. */ +export interface Create200Response extends HttpResponse { + status: "200"; + body: ChatCompletionOutput; +} diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/test/public/sampleTest.spec.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/sampleTest.spec.ts similarity index 89% rename from packages/typespec-test/test/batch/generated/typespec-ts/test/public/sampleTest.spec.ts rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/sampleTest.spec.ts index bce68e4286..97b8e8a02b 100644 --- a/packages/typespec-test/test/batch/generated/typespec-ts/test/public/sampleTest.spec.ts +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/sampleTest.spec.ts @@ -3,7 +3,7 @@ import { Recorder } from "@azure-tools/test-recorder"; import { assert } from "chai"; -import { createRecorder } from "./utils/recordedClient"; +import { createRecorder } from "./utils/recordedClient.js"; import { Context } from "mocha"; describe("My test", () => { diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/test/public/utils/env.browser.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/utils/env.browser.ts similarity index 100% rename from packages/typespec-test/test/batch/generated/typespec-ts/test/public/utils/env.browser.ts rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/utils/env.browser.ts diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/test/public/utils/env.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/utils/env.ts similarity index 100% rename from packages/typespec-test/test/batch/generated/typespec-ts/test/public/utils/env.ts rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/utils/env.ts diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/test/public/utils/recordedClient.ts b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/utils/recordedClient.ts similarity index 100% rename from packages/typespec-test/test/batch/generated/typespec-ts/test/public/utils/recordedClient.ts rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/test/public/utils/recordedClient.ts diff --git a/packages/typespec-test/test/batch/generated/typespec-ts/tsconfig.json b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/tsconfig.json similarity index 76% rename from packages/typespec-test/test/batch/generated/typespec-ts/tsconfig.json rename to packages/typespec-test/test/chatApi_modular/generated/typespec-ts/tsconfig.json index 9ca43fa318..c1c30102d9 100644 --- a/packages/typespec-test/test/batch/generated/typespec-ts/tsconfig.json +++ b/packages/typespec-test/test/chatApi_modular/generated/typespec-ts/tsconfig.json @@ -1,8 +1,8 @@ { "compilerOptions": { "target": "ES2017", - "module": "es6", - "lib": [], + "module": "NodeNext", + "lib": ["esnext", "dom"], "declaration": true, "declarationMap": true, "inlineSources": true, @@ -15,11 +15,13 @@ "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "forceConsistentCasingInFileNames": true, - "moduleResolution": "node", + "moduleResolution": "NodeNext", "allowSyntheticDefaultImports": true, "esModuleInterop": true, "outDir": "./dist-esm", - "declarationDir": "./types" + "declarationDir": "./types", + "rootDir": "." }, + "ts-node": { "esm": true }, "include": ["./src/**/*.ts", "./test/**/*.ts"] } diff --git a/packages/typespec-test/test/chatApi_modular/spec/client.tsp b/packages/typespec-test/test/chatApi_modular/spec/client.tsp new file mode 100644 index 0000000000..10ae5eb19d --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/spec/client.tsp @@ -0,0 +1,7 @@ +import "@azure-tools/typespec-client-generator-core"; + +import "./main.tsp"; + +using Azure.ClientGenerator.Core; + +@@access(Azure.AI.ChatProtocol.Chat.createStreaming, Access.internal, "csharp"); diff --git a/packages/typespec-test/test/chatApi_modular/spec/main.tsp b/packages/typespec-test/test/chatApi_modular/spec/main.tsp new file mode 100644 index 0000000000..45595eb0c1 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/spec/main.tsp @@ -0,0 +1,37 @@ +import "@typespec/rest"; +import "@typespec/http"; +import "@typespec/versioning"; + +import "./operations.tsp"; + +using TypeSpec.Http; +using TypeSpec.Versioning; + +#suppress "@azure-tools/typespec-azure-core/casing-style" "Using the AI acronym similar to what was done for OpenAI" +@doc("Azure APIs for the Azure Chat protocol.") +@service({ + title: "Azure ML Chat", +}) +@useAuth( + ApiKeyAuth | OAuth2Auth<[ + { + type: OAuth2FlowType.implicit, + authorizationUrl: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", + scopes: [], + } + ]> +) +@server( + "{endpoint}", + "Azure Chat Protocol APIs", + { + endpoint: string, + } +) +@versioned(APIVersion) +namespace Azure.AI.ChatProtocol; + +#suppress "@azure-tools/typespec-azure-core/documentation-required" "https://github.com/Azure/typespec-azure/issues/3107" +enum APIVersion { + v20231001Preview: "2023-10-01-preview", +} diff --git a/packages/typespec-test/test/chatApi_modular/spec/models/blocking.tsp b/packages/typespec-test/test/chatApi_modular/spec/models/blocking.tsp new file mode 100644 index 0000000000..81ec34b77e --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/spec/models/blocking.tsp @@ -0,0 +1,36 @@ +import "./common.tsp"; + +namespace Azure.AI.ChatProtocol; + +@doc("The representation of a single generated completion.") +model ChatChoice { + @doc("The index of the of the chat choice, relative to the other choices in the same completion.") + index: safeint; + + @doc("The chat message for a given chat completion.") + message: ChatMessage; + + ...StateProperty; + ...ContextProperty; + + @doc("The reason this chat completion completed its generation.") + @projectedName("json", "finish_reason") + finishReason: FinishReason; +} +@doc("Representation of the response to a chat completion request.") +model ChatCompletion { + @doc("The collection of generated completions.") + choices: ChatChoice[]; +} + +@doc("The configuration for a chat completion request.") +model ChatCompletionOptions { + @doc("The collection of context messages associated with this completion request.") + messages: ChatMessage[]; + + @doc("Indicates whether the completion is a streaming or non-streaming completion.") + stream: false; + + ...StateProperty; + ...ContextProperty; +} diff --git a/packages/typespec-test/test/chatApi_modular/spec/models/common.tsp b/packages/typespec-test/test/chatApi_modular/spec/models/common.tsp new file mode 100644 index 0000000000..7fdf47c944 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/spec/models/common.tsp @@ -0,0 +1,60 @@ +import "@typespec/http"; + +namespace Azure.AI.ChatProtocol; + +using TypeSpec.Http; + +@doc("Representation of the reason why a chat session has finished processing.") +enum FinishReason { + @doc("Completion ended normally.") + stopped: "stop", + + @doc("The completion exhausted available tokens before generation could complete.") + tokenLimitReached: "length", +} + +@doc("A representation of the intended purpose of a message.") +enum ChatRole { + @doc("The role that provides input to the completion.") + user: "user", + + @doc("The role that instructs or configures the behavior of the assistant.") + system: "system", + + @doc("The role that provides responses to the system-instructed, user-prompted input.") + assistant: "assistant", +} + +@doc("A property that represents backend-specific context or arguments.") +model ContextProperty { + #suppress "@azure-tools/typespec-azure-core/bad-record-type" "Protocol defines the type as Record" + @doc(""" + Context allows the chat app to receive extra parameters from the client, such as temperature, functions, or + customer_info. These parameters are specific to the chat app and not understood by the generic clients. + """) + context?: TContext; +} + +@doc("A property that represents backend-specific information for the tracking of a session.") +model StateProperty { + #suppress "@azure-tools/typespec-azure-core/no-unknown" "The protocol defines these as any (object/scalar) for now" + @doc(""" + Field that allows the chat app to store and retrieve data, the structure of such data is dependant on the backend + being used. The client must send back the data in this field unchanged in subsequent requests, until the chat app + sends a new one. The data in this field can be used to implement stateful services, such as remembering previous + conversations or user preferences. + """) + @projectedName("json", "session_state") + sessionState?: TState; +} + +@doc("A single, role-attributed message within a chat completion interaction.") +model ChatMessage { + @doc("The text associated with the message.") + content: string; + + @doc("The role associated with the message.") + role: ChatRole; + + ...StateProperty; +} diff --git a/packages/typespec-test/test/chatApi_modular/spec/models/streaming.tsp b/packages/typespec-test/test/chatApi_modular/spec/models/streaming.tsp new file mode 100644 index 0000000000..c1ede58187 --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/spec/models/streaming.tsp @@ -0,0 +1,48 @@ +import "./common.tsp"; + +namespace Azure.AI.ChatProtocol; + +@doc("The representation of a delta message received in a streaming completion.") +model ChatMessageDelta { + @doc("An incremental part of the text associated with the message.") + content?: string; + + @doc("The role associated with the message.") + role?: ChatRole; + + ...StateProperty; +} + +@doc("The representation of an incremental choice received in a streaming completion.") +model ChoiceDelta { + @doc("The index of the of the chat choice, relative to the other choices in the same completion.") + index: safeint; + + @doc("The partial message received for this choice.") + delta: ChatMessageDelta; + + ...StateProperty; + ...ContextProperty; + + @doc("The reason this chat completion completed its generation.") + @projectedName("json", "finish_reason") + finishReason?: FinishReason; +} + +@doc("A single response to a streaming completion request.") +model ChatCompletionChunk { + @doc("The collection of choice deltas received in this chunk.") + choices: ChoiceDelta[]; +} + +@doc("The configuration for a streaming chat completion request.") +model StreamingChatCompletionOptions { + @doc("The collection of context messages associated with this completion request.") + messages: ChatMessage[]; + + @doc("Indicates whether the completion is a streaming or non-streaming completion.") + stream: true; + + ...StateProperty; + ...ContextProperty; +} diff --git a/packages/typespec-test/test/chatApi_modular/spec/operations.tsp b/packages/typespec-test/test/chatApi_modular/spec/operations.tsp new file mode 100644 index 0000000000..134da5406e --- /dev/null +++ b/packages/typespec-test/test/chatApi_modular/spec/operations.tsp @@ -0,0 +1,31 @@ +import "@typespec/http"; + +import "./models/blocking.tsp"; +import "./models/streaming.tsp"; + +namespace Azure.AI.ChatProtocol; + +using TypeSpec.Http; + +interface Chat { + /* This operation returns a stream of objects in jsonl format. */ + #suppress "@azure-tools/typespec-azure-core/use-standard-operations" "Defining the operation in an Azure independent fashion" + #suppress "@azure-tools/typespec-azure-core/operation-missing-api-version" "API version is not supported by the back end" + @doc("Creates a new streaming chat completion.") + @sharedRoute + createStreaming( + ...StreamingChatCompletionOptions, + ): ChatCompletionChunk; + + #suppress "@azure-tools/typespec-azure-core/use-standard-operations" "Defining the operation in an Azure independent fashion" + #suppress "@azure-tools/typespec-azure-core/operation-missing-api-version" "API version is not supported by the back end" + @doc("Creates a new chat completion.") + @sharedRoute + create(...ChatCompletionOptions): ChatCompletion< + TState, + TContext + >; +} + +@route("/chat") +interface GenericChatClient extends Chat> {} diff --git a/packages/typespec-test/test/batch/tspconfig.yaml b/packages/typespec-test/test/chatApi_modular/tspconfig.yaml similarity index 51% rename from packages/typespec-test/test/batch/tspconfig.yaml rename to packages/typespec-test/test/chatApi_modular/tspconfig.yaml index d65f608985..4e5a7fa833 100644 --- a/packages/typespec-test/test/batch/tspconfig.yaml +++ b/packages/typespec-test/test/chatApi_modular/tspconfig.yaml @@ -2,12 +2,8 @@ emit: - "@azure-tools/typespec-ts" options: "@azure-tools/typespec-ts": - title: Batch Service - generateMetadata: true - generateTest: true azureSdkForJs: false + isModularLibrary: true "emitter-output-dir": "{project-root}/generated/typespec-ts" packageDetails: - name: "@azure-rest/batch" - description: "Batch Service" - version: "1.0.0-beta.1" + name: "@azure/ai-chat-protocol" diff --git a/packages/typespec-test/test/confidentialLedger/generated/typespec-ts/src/confidentialLedgerClient.ts b/packages/typespec-test/test/confidentialLedger/generated/typespec-ts/src/confidentialLedgerClient.ts index 7e507e01a3..c804c3190f 100644 --- a/packages/typespec-test/test/confidentialLedger/generated/typespec-ts/src/confidentialLedgerClient.ts +++ b/packages/typespec-test/test/confidentialLedger/generated/typespec-ts/src/confidentialLedgerClient.ts @@ -19,15 +19,6 @@ export default function createClient( ): ConfidentialLedgerClient { const baseUrl = options.baseUrl ?? `${ledgerUri}`; options.apiVersion = options.apiVersion ?? "2022-05-13"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://confidential-ledger.azure.com/.default", - ], - }, - }; - const userAgentInfo = `azsdk-js-confidential-ledger-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -41,6 +32,11 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://confidential-ledger.azure.com/.default", + ], + }, }; const client = getClient( diff --git a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/rest/contentSafetyClient.ts b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/rest/contentSafetyClient.ts index 3e4ef67b12..6e76e8a85c 100644 --- a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/rest/contentSafetyClient.ts +++ b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/rest/contentSafetyClient.ts @@ -20,17 +20,6 @@ export default function createClient( ): ContentSafetyContext { const baseUrl = options.baseUrl ?? `${endpoint}/contentsafety`; options.apiVersion = options.apiVersion ?? "2023-10-01"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://cognitiveservices.azure.com/.default", - ], - apiKeyHeaderName: - options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", - }, - }; - const userAgentInfo = `azsdk-js-ai-content-safety-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -44,6 +33,13 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://cognitiveservices.azure.com/.default", + ], + apiKeyHeaderName: + options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", + }, }; const client = getClient( diff --git a/packages/typespec-test/test/contoso/generated/typespec-ts/src/widgetManagerClient.ts b/packages/typespec-test/test/contoso/generated/typespec-ts/src/widgetManagerClient.ts index 0a56fb2e0c..300c1c65f2 100644 --- a/packages/typespec-test/test/contoso/generated/typespec-ts/src/widgetManagerClient.ts +++ b/packages/typespec-test/test/contoso/generated/typespec-ts/src/widgetManagerClient.ts @@ -16,7 +16,6 @@ export default function createClient( ): WidgetManagerClient { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "2022-11-01-preview"; - const userAgentInfo = `azsdk-js-contosowidgetmanager-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-test/test/customWrapper/generated/typespec-ts/src/authoringClient.ts b/packages/typespec-test/test/customWrapper/generated/typespec-ts/src/authoringClient.ts index 5d7ae1ef0a..034a111cdb 100644 --- a/packages/typespec-test/test/customWrapper/generated/typespec-ts/src/authoringClient.ts +++ b/packages/typespec-test/test/customWrapper/generated/typespec-ts/src/authoringClient.ts @@ -19,14 +19,6 @@ export default function createClient( ): AuthoringClient { const baseUrl = options.baseUrl ?? `${endpoint}/language`; options.apiVersion = options.apiVersion ?? "2022-05-15-preview"; - options = { - ...options, - credentials: { - apiKeyHeaderName: - options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", - }, - }; - const userAgentInfo = `azsdk-js-customWrapper-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -40,6 +32,10 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + apiKeyHeaderName: + options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", + }, }; const client = getClient(baseUrl, credentials, options) as AuthoringClient; diff --git a/packages/typespec-test/test/eventgrid_modular/generated/typespec-ts/src/rest/eventGridClient.ts b/packages/typespec-test/test/eventgrid_modular/generated/typespec-ts/src/rest/eventGridClient.ts index 2c21d362f6..dcfbfd4008 100644 --- a/packages/typespec-test/test/eventgrid_modular/generated/typespec-ts/src/rest/eventGridClient.ts +++ b/packages/typespec-test/test/eventgrid_modular/generated/typespec-ts/src/rest/eventGridClient.ts @@ -19,14 +19,6 @@ export default function createClient( ): EventGridContext { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "2023-06-01-preview"; - options = { - ...options, - credentials: { - apiKeyHeaderName: - options.credentials?.apiKeyHeaderName ?? "SharedAccessKey", - }, - }; - const userAgentInfo = `azsdk-js-eventgrid-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -40,6 +32,10 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + apiKeyHeaderName: + options.credentials?.apiKeyHeaderName ?? "SharedAccessKey", + }, }; const client = getClient(baseUrl, credentials, options) as EventGridContext; diff --git a/packages/typespec-test/test/healthinsight/generated/typespec-ts/src/healthInsightsClinicalMatching.ts b/packages/typespec-test/test/healthinsight/generated/typespec-ts/src/healthInsightsClinicalMatching.ts index a58c5a3d18..850c0bd890 100644 --- a/packages/typespec-test/test/healthinsight/generated/typespec-ts/src/healthInsightsClinicalMatching.ts +++ b/packages/typespec-test/test/healthinsight/generated/typespec-ts/src/healthInsightsClinicalMatching.ts @@ -19,14 +19,6 @@ export default function createClient( ): HealthInsightsClinicalMatchingClient { const baseUrl = options.baseUrl ?? `${endpoint}/healthinsights`; options.apiVersion = options.apiVersion ?? "2023-03-01-preview"; - options = { - ...options, - credentials: { - apiKeyHeaderName: - options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", - }, - }; - const userAgentInfo = `azsdk-js-health-insights-clinicalmatching-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -40,6 +32,10 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + apiKeyHeaderName: + options.credentials?.apiKeyHeaderName ?? "Ocp-Apim-Subscription-Key", + }, }; const client = getClient( diff --git a/packages/typespec-test/test/loadTest/generated/typespec-ts/src/azureLoadTesting.ts b/packages/typespec-test/test/loadTest/generated/typespec-ts/src/azureLoadTesting.ts index d771bfb283..b4a5d6a1e1 100644 --- a/packages/typespec-test/test/loadTest/generated/typespec-ts/src/azureLoadTesting.ts +++ b/packages/typespec-test/test/loadTest/generated/typespec-ts/src/azureLoadTesting.ts @@ -19,15 +19,6 @@ export default function createClient( ): AzureLoadTestingClient { const baseUrl = options.baseUrl ?? `https://${endpoint}`; options.apiVersion = options.apiVersion ?? "2022-11-01"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://cnt-prod.loadtesting.azure.com/.default", - ], - }, - }; - const userAgentInfo = `azsdk-js-load-testing-rest/1.0.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -41,6 +32,11 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://cnt-prod.loadtesting.azure.com/.default", + ], + }, }; const client = getClient( diff --git a/packages/typespec-test/test/loadtesting_modular/generated/typespec-ts/src/rest/azureLoadTestingClient.ts b/packages/typespec-test/test/loadtesting_modular/generated/typespec-ts/src/rest/azureLoadTestingClient.ts index 6078c41784..fe4d3294a6 100644 --- a/packages/typespec-test/test/loadtesting_modular/generated/typespec-ts/src/rest/azureLoadTestingClient.ts +++ b/packages/typespec-test/test/loadtesting_modular/generated/typespec-ts/src/rest/azureLoadTestingClient.ts @@ -19,15 +19,6 @@ export default function createClient( ): AzureLoadTestingContext { const baseUrl = options.baseUrl ?? `https://${endpoint}`; options.apiVersion = options.apiVersion ?? "2022-11-01"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://cnt-prod.loadtesting.azure.com/.default", - ], - }, - }; - const userAgentInfo = `azsdk-js-load-testing-rest/1.0.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -41,6 +32,11 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://cnt-prod.loadtesting.azure.com/.default", + ], + }, }; const client = getClient( diff --git a/packages/typespec-test/test/openai/generated/typespec-ts/src/openAIClient.ts b/packages/typespec-test/test/openai/generated/typespec-ts/src/openAIClient.ts index 3c472cdb86..27f42d1662 100644 --- a/packages/typespec-test/test/openai/generated/typespec-ts/src/openAIClient.ts +++ b/packages/typespec-test/test/openai/generated/typespec-ts/src/openAIClient.ts @@ -20,16 +20,6 @@ export default function createClient( ): OpenAIClient { const baseUrl = options.baseUrl ?? `${endpoint}/openai`; options.apiVersion = options.apiVersion ?? "2022-12-01"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://cognitiveservices.azure.com/.default", - ], - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", - }, - }; - const userAgentInfo = `azsdk-js-openai-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -43,6 +33,12 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://cognitiveservices.azure.com/.default", + ], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, }; const client = getClient(baseUrl, credentials, options) as OpenAIClient; diff --git a/packages/typespec-test/test/openai_modular/generated/typespec-ts/src/rest/openAIClient.ts b/packages/typespec-test/test/openai_modular/generated/typespec-ts/src/rest/openAIClient.ts index 2c7f219735..f03f6364bc 100644 --- a/packages/typespec-test/test/openai_modular/generated/typespec-ts/src/rest/openAIClient.ts +++ b/packages/typespec-test/test/openai_modular/generated/typespec-ts/src/rest/openAIClient.ts @@ -20,16 +20,6 @@ export default function createClient( ): OpenAIContext { const baseUrl = options.baseUrl ?? `${endpoint}/openai`; options.apiVersion = options.apiVersion ?? "2023-08-01-preview"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://cognitiveservices.azure.com/.default", - ], - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", - }, - }; - const userAgentInfo = `azsdk-js-openai_modular-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -43,6 +33,12 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://cognitiveservices.azure.com/.default", + ], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, }; const client = getClient(baseUrl, credentials, options) as OpenAIContext; diff --git a/packages/typespec-test/test/parametrizedHost/generated/typespec-ts/src/parametrizedHostClient.ts b/packages/typespec-test/test/parametrizedHost/generated/typespec-ts/src/parametrizedHostClient.ts index 37b92fbba2..2151393217 100644 --- a/packages/typespec-test/test/parametrizedHost/generated/typespec-ts/src/parametrizedHostClient.ts +++ b/packages/typespec-test/test/parametrizedHost/generated/typespec-ts/src/parametrizedHostClient.ts @@ -29,15 +29,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${host}.${subdomain}.${sufix}.com/${apiVersion}`; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://parametrized-host.azure.com/.default", - ], - }, - }; - const userAgentInfo = `azsdk-js-parametrized-host-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -51,6 +42,11 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://parametrized-host.azure.com/.default", + ], + }, }; const client = getClient( diff --git a/packages/typespec-test/test/translator/generated/typespec-ts/src/translatorClient.ts b/packages/typespec-test/test/translator/generated/typespec-ts/src/translatorClient.ts index 3e0bf1c5d0..8c78e61a59 100644 --- a/packages/typespec-test/test/translator/generated/typespec-ts/src/translatorClient.ts +++ b/packages/typespec-test/test/translator/generated/typespec-ts/src/translatorClient.ts @@ -17,7 +17,6 @@ export default function createClient( ): TranslatorClient { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "3.0"; - const userAgentInfo = `azsdk-js-cognitiveservices-translator-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/review/widget_dpg.api.md b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/review/widget_dpg.api.md index 0e371d8c6f..edbbf5614a 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/review/widget_dpg.api.md +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/review/widget_dpg.api.md @@ -38,6 +38,14 @@ export interface GetWidgetOptions extends OperationOptions { // @public (undocumented) export interface ListWidgetsOptions extends OperationOptions { + // (undocumented) + nullableDateHeader?: Date | null; + // (undocumented) + nullableOptionalHeader?: string | null; + // (undocumented) + optionalDateHeader?: Date; + // (undocumented) + optionalHeader?: string; } // @public (undocumented) @@ -64,7 +72,7 @@ export class WidgetServiceClient { createWidget(body: CreateWidget, options?: CreateWidgetOptions): Promise; deleteWidget(id: string, options?: DeleteWidgetOptions): Promise; getWidget(id: string, options?: GetWidgetOptions): Promise; - listWidgets(options?: ListWidgetsOptions): Promise; + listWidgets(requiredHeader: string, bytesHeader: Uint8Array, value: Uint8Array, csvArrayHeader: Uint8Array[], utcDateHeader: Date, options?: ListWidgetsOptions): Promise; readonly pipeline: Pipeline; updateWidget(id: string, body: UpdateWidget, options?: UpdateWidgetOptions): Promise; } diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/WidgetServiceClient.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/WidgetServiceClient.ts index b1cf0abab1..601eeef054 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/WidgetServiceClient.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/WidgetServiceClient.ts @@ -46,9 +46,22 @@ export class WidgetServiceClient { * It does not accept any options or parameters. */ listWidgets( + requiredHeader: string, + bytesHeader: Uint8Array, + value: Uint8Array, + csvArrayHeader: Uint8Array[], + utcDateHeader: Date, options: ListWidgetsOptions = { requestOptions: {} } ): Promise { - return listWidgets(this._client, options); + return listWidgets( + this._client, + requiredHeader, + bytesHeader, + value, + csvArrayHeader, + utcDateHeader, + options + ); } /** Get a widget by ID. */ diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/api/operations.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/api/operations.ts index f19058a4af..98895ed1e5 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/api/operations.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/api/operations.ts @@ -10,6 +10,7 @@ import { import { AnalyzeWidget200Response, AnalyzeWidgetDefaultResponse, + buildCsvCollection, CreateWidget201Response, CreateWidgetDefaultResponse, DeleteWidget204Response, @@ -27,6 +28,7 @@ import { StreamableMethod, operationOptionsToRequestParameters, } from "@azure-rest/core-client"; +import { uint8ArrayToString } from "@azure/core-util"; import { ListWidgetsOptions, GetWidgetOptions, @@ -38,11 +40,47 @@ import { export function _listWidgetsSend( context: Client, + requiredHeader: string, + bytesHeader: Uint8Array, + value: Uint8Array, + csvArrayHeader: Uint8Array[], + utcDateHeader: Date, options: ListWidgetsOptions = { requestOptions: {} } ): StreamableMethod { return context .path("/widgets") - .get({ ...operationOptionsToRequestParameters(options) }); + .get({ + ...operationOptionsToRequestParameters(options), + headers: { + "required-header": requiredHeader, + ...(options?.optionalHeader !== undefined + ? { "optional-header": options?.optionalHeader } + : {}), + ...(options?.nullableOptionalHeader !== undefined && + options?.nullableOptionalHeader !== null + ? { "nullable-optional-header": options?.nullableOptionalHeader } + : {}), + "bytes-header": uint8ArrayToString(bytesHeader, "base64"), + value: uint8ArrayToString(value, "base64"), + "csv-array-header": buildCsvCollection( + (csvArrayHeader ?? []).map((p) => uint8ArrayToString(p, "base64url")) + ), + "utc-date-header": utcDateHeader.toUTCString(), + ...(options?.optionalDateHeader !== undefined + ? { + "optional-date-header": + options?.optionalDateHeader?.toUTCString(), + } + : {}), + ...(options?.nullableDateHeader !== undefined && + options?.nullableDateHeader !== null + ? { + "nullable-date-header": + options?.nullableDateHeader?.toUTCString(), + } + : {}), + }, + }); } export async function _listWidgetsDeserialize( @@ -66,9 +104,22 @@ export async function _listWidgetsDeserialize( */ export async function listWidgets( context: Client, + requiredHeader: string, + bytesHeader: Uint8Array, + value: Uint8Array, + csvArrayHeader: Uint8Array[], + utcDateHeader: Date, options: ListWidgetsOptions = { requestOptions: {} } ): Promise { - const result = await _listWidgetsSend(context, options); + const result = await _listWidgetsSend( + context, + requiredHeader, + bytesHeader, + value, + csvArrayHeader, + utcDateHeader, + options + ); return _listWidgetsDeserialize(result); } diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/models/options.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/models/options.ts index d4dbf5bcd0..a692005100 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/models/options.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/models/options.ts @@ -3,7 +3,12 @@ import { OperationOptions } from "@azure-rest/core-client"; -export interface ListWidgetsOptions extends OperationOptions {} +export interface ListWidgetsOptions extends OperationOptions { + optionalHeader?: string; + nullableOptionalHeader?: string | null; + optionalDateHeader?: Date; + nullableDateHeader?: Date | null; +} export interface GetWidgetOptions extends OperationOptions {} diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/clientDefinitions.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/clientDefinitions.ts index c1f141ab20..4ae0b111ed 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/clientDefinitions.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/clientDefinitions.ts @@ -32,7 +32,7 @@ export interface ListWidgets { * It does not accept any options or parameters. */ get( - options?: ListWidgetsParameters + options: ListWidgetsParameters ): StreamableMethod; /** * Create a new widget. diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/index.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/index.ts index 954ee32351..ae9a8b1539 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/index.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/index.ts @@ -10,5 +10,6 @@ export * from "./clientDefinitions.js"; export * from "./isUnexpected.js"; export * from "./models.js"; export * from "./outputModels.js"; +export * from "./serializeHelper.js"; export default WidgetServiceClient; diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/parameters.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/parameters.ts index 6caca5735d..7db3c5c9a8 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/parameters.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/parameters.ts @@ -1,10 +1,28 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +import { RawHttpHeadersInput } from "@azure/core-rest-pipeline"; import { RequestParameters } from "@azure-rest/core-client"; import { CreateWidget, UpdateWidget } from "./models.js"; -export type ListWidgetsParameters = RequestParameters; +export interface ListWidgetsHeaders { + "required-header": string; + "optional-header"?: string; + "nullable-optional-header"?: string | null; + "bytes-header": string; + value: string; + /** This parameter needs to be formatted as csv collection, we provide buildCsvCollection from serializeHelper.ts to help */ + "csv-array-header": string; + "utc-date-header": string; + "optional-date-header"?: string; + "nullable-date-header"?: string | null; +} + +export interface ListWidgetsHeaderParam { + headers: RawHttpHeadersInput & ListWidgetsHeaders; +} + +export type ListWidgetsParameters = ListWidgetsHeaderParam & RequestParameters; export type GetWidgetParameters = RequestParameters; export interface CreateWidgetBodyParam { diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/serializeHelper.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/serializeHelper.ts new file mode 100644 index 0000000000..aeb95223cc --- /dev/null +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/serializeHelper.ts @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export function buildCsvCollection(items: string[] | number[]) { + return items.join(","); +} diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/widgetServiceClient.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/widgetServiceClient.ts index 8c97aa3b99..e96b2dc701 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/widgetServiceClient.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/sources/generated/src/rest/widgetServiceClient.ts @@ -16,7 +16,6 @@ export default function createClient( ): WidgetServiceContext { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-widget_dpg-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/WidgetServiceClient.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/WidgetServiceClient.ts index b1cf0abab1..601eeef054 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/WidgetServiceClient.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/WidgetServiceClient.ts @@ -46,9 +46,22 @@ export class WidgetServiceClient { * It does not accept any options or parameters. */ listWidgets( + requiredHeader: string, + bytesHeader: Uint8Array, + value: Uint8Array, + csvArrayHeader: Uint8Array[], + utcDateHeader: Date, options: ListWidgetsOptions = { requestOptions: {} } ): Promise { - return listWidgets(this._client, options); + return listWidgets( + this._client, + requiredHeader, + bytesHeader, + value, + csvArrayHeader, + utcDateHeader, + options + ); } /** Get a widget by ID. */ diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/api/operations.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/api/operations.ts index f19058a4af..98895ed1e5 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/api/operations.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/api/operations.ts @@ -10,6 +10,7 @@ import { import { AnalyzeWidget200Response, AnalyzeWidgetDefaultResponse, + buildCsvCollection, CreateWidget201Response, CreateWidgetDefaultResponse, DeleteWidget204Response, @@ -27,6 +28,7 @@ import { StreamableMethod, operationOptionsToRequestParameters, } from "@azure-rest/core-client"; +import { uint8ArrayToString } from "@azure/core-util"; import { ListWidgetsOptions, GetWidgetOptions, @@ -38,11 +40,47 @@ import { export function _listWidgetsSend( context: Client, + requiredHeader: string, + bytesHeader: Uint8Array, + value: Uint8Array, + csvArrayHeader: Uint8Array[], + utcDateHeader: Date, options: ListWidgetsOptions = { requestOptions: {} } ): StreamableMethod { return context .path("/widgets") - .get({ ...operationOptionsToRequestParameters(options) }); + .get({ + ...operationOptionsToRequestParameters(options), + headers: { + "required-header": requiredHeader, + ...(options?.optionalHeader !== undefined + ? { "optional-header": options?.optionalHeader } + : {}), + ...(options?.nullableOptionalHeader !== undefined && + options?.nullableOptionalHeader !== null + ? { "nullable-optional-header": options?.nullableOptionalHeader } + : {}), + "bytes-header": uint8ArrayToString(bytesHeader, "base64"), + value: uint8ArrayToString(value, "base64"), + "csv-array-header": buildCsvCollection( + (csvArrayHeader ?? []).map((p) => uint8ArrayToString(p, "base64url")) + ), + "utc-date-header": utcDateHeader.toUTCString(), + ...(options?.optionalDateHeader !== undefined + ? { + "optional-date-header": + options?.optionalDateHeader?.toUTCString(), + } + : {}), + ...(options?.nullableDateHeader !== undefined && + options?.nullableDateHeader !== null + ? { + "nullable-date-header": + options?.nullableDateHeader?.toUTCString(), + } + : {}), + }, + }); } export async function _listWidgetsDeserialize( @@ -66,9 +104,22 @@ export async function _listWidgetsDeserialize( */ export async function listWidgets( context: Client, + requiredHeader: string, + bytesHeader: Uint8Array, + value: Uint8Array, + csvArrayHeader: Uint8Array[], + utcDateHeader: Date, options: ListWidgetsOptions = { requestOptions: {} } ): Promise { - const result = await _listWidgetsSend(context, options); + const result = await _listWidgetsSend( + context, + requiredHeader, + bytesHeader, + value, + csvArrayHeader, + utcDateHeader, + options + ); return _listWidgetsDeserialize(result); } diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/models/options.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/models/options.ts index d4dbf5bcd0..a692005100 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/models/options.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/models/options.ts @@ -3,7 +3,12 @@ import { OperationOptions } from "@azure-rest/core-client"; -export interface ListWidgetsOptions extends OperationOptions {} +export interface ListWidgetsOptions extends OperationOptions { + optionalHeader?: string; + nullableOptionalHeader?: string | null; + optionalDateHeader?: Date; + nullableDateHeader?: Date | null; +} export interface GetWidgetOptions extends OperationOptions {} diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/clientDefinitions.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/clientDefinitions.ts index c1f141ab20..4ae0b111ed 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/clientDefinitions.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/clientDefinitions.ts @@ -32,7 +32,7 @@ export interface ListWidgets { * It does not accept any options or parameters. */ get( - options?: ListWidgetsParameters + options: ListWidgetsParameters ): StreamableMethod; /** * Create a new widget. diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/index.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/index.ts index 954ee32351..ae9a8b1539 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/index.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/index.ts @@ -10,5 +10,6 @@ export * from "./clientDefinitions.js"; export * from "./isUnexpected.js"; export * from "./models.js"; export * from "./outputModels.js"; +export * from "./serializeHelper.js"; export default WidgetServiceClient; diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/parameters.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/parameters.ts index 6caca5735d..7db3c5c9a8 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/parameters.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/parameters.ts @@ -1,10 +1,28 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +import { RawHttpHeadersInput } from "@azure/core-rest-pipeline"; import { RequestParameters } from "@azure-rest/core-client"; import { CreateWidget, UpdateWidget } from "./models.js"; -export type ListWidgetsParameters = RequestParameters; +export interface ListWidgetsHeaders { + "required-header": string; + "optional-header"?: string; + "nullable-optional-header"?: string | null; + "bytes-header": string; + value: string; + /** This parameter needs to be formatted as csv collection, we provide buildCsvCollection from serializeHelper.ts to help */ + "csv-array-header": string; + "utc-date-header": string; + "optional-date-header"?: string; + "nullable-date-header"?: string | null; +} + +export interface ListWidgetsHeaderParam { + headers: RawHttpHeadersInput & ListWidgetsHeaders; +} + +export type ListWidgetsParameters = ListWidgetsHeaderParam & RequestParameters; export type GetWidgetParameters = RequestParameters; export interface CreateWidgetBodyParam { diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/serializeHelper.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/serializeHelper.ts new file mode 100644 index 0000000000..aeb95223cc --- /dev/null +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/serializeHelper.ts @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export function buildCsvCollection(items: string[] | number[]) { + return items.join(","); +} diff --git a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/widgetServiceClient.ts b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/widgetServiceClient.ts index 8c97aa3b99..e96b2dc701 100644 --- a/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/widgetServiceClient.ts +++ b/packages/typespec-test/test/widget_dpg/generated/typespec-ts/src/rest/widgetServiceClient.ts @@ -16,7 +16,6 @@ export default function createClient( ): WidgetServiceContext { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-widget_dpg-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-test/test/widget_dpg/spec/main.tsp b/packages/typespec-test/test/widget_dpg/spec/main.tsp index 54ac169553..118d2ca661 100644 --- a/packages/typespec-test/test/widget_dpg/spec/main.tsp +++ b/packages/typespec-test/test/widget_dpg/spec/main.tsp @@ -37,6 +37,9 @@ model AnalyzeResult { summary: string; } +@encode(BytesKnownEncoding.base64url) +scalar base64urlBytes extends bytes; + @error model WidgetError { @doc("The HTTP error code.") @@ -54,7 +57,20 @@ List all widgets in the system. This operation is not paginated, and returns a s It does not accept any options or parameters. """) - @get listWidgets(): Widget[] | WidgetError; + @get listWidgets( + @header requiredHeader: string, + @header optionalHeader?: string, + @header nullableOptionalHeader?: string | null, + @header bytesHeader: bytes, + @header @encode(BytesKnownEncoding.base64) value: bytes, + @header({ + format: "csv", + }) + csvArrayHeader: base64urlBytes[], + @header utcDateHeader: utcDateTime, + @header optionalDateHeader?: utcDateTime, + @header nullableDateHeader?: utcDateTime | null, + ): Widget[] | WidgetError; @doc("Get a widget by ID.") @get getWidget(@path id: string): Widget | WidgetError; diff --git a/packages/typespec-ts/package.json b/packages/typespec-ts/package.json index 78572ed227..129461745c 100644 --- a/packages/typespec-ts/package.json +++ b/packages/typespec-ts/package.json @@ -24,8 +24,8 @@ "generate-and-run:rlc": "npm run generate-tsp-only:rlc && npm run integration-test:alone:rlc && npm run stop-test-server", "generate-and-run:modular": "npm run generate-tsp-only:modular && npm run integration-test:alone:modular && npm run stop-test-server", "generate-tsp-only": "npm run generate-tsp-only:rlc && npm run generate-tsp-only:modular", - "generate-tsp-only:rlc": "ts-node ./test/commands/gen-cadl-ranch.ts --tag=rlc", - "generate-tsp-only:modular": "ts-node ./test/commands/gen-cadl-ranch.ts --tag=modular", + "generate-tsp-only:rlc": "node --loader ts-node/esm ./test/commands/gen-cadl-ranch.ts --tag=rlc", + "generate-tsp-only:modular": "node --loader ts-node/esm ./test/commands/gen-cadl-ranch.ts --tag=modular", "integration-test:alone": "npm run integration-test:alone:rlc && npm run integration-test:alone:modular", "integration-test:alone:rlc": "cross-env TS_NODE_PROJECT=tsconfig.integration.json mocha -r ts-node/register --experimental-specifier-resolution=node --timeout 4000 ./test/integration/*.spec.ts", "integration-test:alone:modular": "cross-env TS_NODE_PROJECT=tsconfig.integration.json mocha -r ts-node/register --experimental-specifier-resolution=node --timeout 4000 ./test/modularIntegration/*.spec.ts", diff --git a/packages/typespec-ts/src/lib.ts b/packages/typespec-ts/src/lib.ts index c7f4cee832..7aca167249 100644 --- a/packages/typespec-ts/src/lib.ts +++ b/packages/typespec-ts/src/lib.ts @@ -182,6 +182,13 @@ const libDef = { default: "Please provide credential scopes to ensure the token credential signature can be generated." } + }, + "nullable-required-header": { + severity: "warning", + messages: { + default: + "Required header cannot be nullable. Please remove the nullable modifier." + } } }, emitter: { diff --git a/packages/typespec-ts/src/modular/buildOperations.ts b/packages/typespec-ts/src/modular/buildOperations.ts index d781c61834..94eef7c423 100644 --- a/packages/typespec-ts/src/modular/buildOperations.ts +++ b/packages/typespec-ts/src/modular/buildOperations.ts @@ -76,6 +76,7 @@ export function buildOperationFiles( operationGroup.operations.forEach((o) => { const operationDeclaration = getOperationFunction(o, clientType); const sendOperationDeclaration = getSendPrivateFunction( + dpgContext, o, clientType, importSet diff --git a/packages/typespec-ts/src/modular/helpers/operationHelpers.ts b/packages/typespec-ts/src/modular/helpers/operationHelpers.ts index 4ae8e43013..5d78f5dec4 100644 --- a/packages/typespec-ts/src/modular/helpers/operationHelpers.ts +++ b/packages/typespec-ts/src/modular/helpers/operationHelpers.ts @@ -29,6 +29,9 @@ import { getCollectionFormatHelper, hasCollectionFormatInfo } from "../../utils/operationUtil.js"; +import { SdkContext } from "@azure-tools/typespec-client-generator-core"; +import { Program, NoTarget } from "@typespec/compiler"; +import { reportDiagnostic } from "../../lib.js"; function getRLCResponseType(rlcResponse?: OperationResponse) { if (!rlcResponse?.responses) { @@ -48,6 +51,7 @@ function getRLCResponseType(rlcResponse?: OperationResponse) { } export function getSendPrivateFunction( + dpgContext: SdkContext, operation: Operation, clientType: string, importSet: Map> @@ -71,6 +75,7 @@ export function getSendPrivateFunction( `return context.path("${operationPath}", ${getPathParameters( operation )}).${operationMethod}({...operationOptionsToRequestParameters(options), ${getRequestParameters( + dpgContext, operation, importSet )}});` @@ -284,6 +289,7 @@ export function getOperationOptionsName( * Figuring out what goes in headers, body, path and qsp. */ function getRequestParameters( + dpgContext: SdkContext, operation: Operation, importSet: Map> ): string { @@ -298,7 +304,7 @@ function getRequestParameters( const parametersImplementation: Record< "header" | "query" | "body", - string[] + { paramMap: string; param: Parameter }[] > = { header: [], query: [], @@ -311,9 +317,10 @@ function getRequestParameters( param.location === "query" || param.location === "body" ) { - parametersImplementation[param.location].push( - getParameterMap(param, importSet) - ); + parametersImplementation[param.location].push({ + paramMap: getParameterMap(param, importSet), + param + }); } } @@ -324,23 +331,23 @@ function getRequestParameters( } if (parametersImplementation.header.length) { - paramStr = `${paramStr}\nheaders: {${parametersImplementation.header.join( - ",\n" - )}},`; + paramStr = `${paramStr}\nheaders: {${parametersImplementation.header + .map((i) => buildHeaderParameter(dpgContext.program, i.paramMap, i.param)) + .join(",\n")}},`; } if (parametersImplementation.query.length) { - paramStr = `${paramStr}\nqueryParameters: {${parametersImplementation.query.join( - ",\n" - )}},`; + paramStr = `${paramStr}\nqueryParameters: {${parametersImplementation.query + .map((i) => i.paramMap) + .join(",\n")}},`; } if ( operation.bodyParameter === undefined && parametersImplementation.body.length ) { - paramStr = `${paramStr}\nbody: {${parametersImplementation.body.join( - ",\n" - )}}`; + paramStr = `${paramStr}\nbody: {${parametersImplementation.body + .map((i) => i.paramMap) + .join(",\n")}}`; } else if (operation.bodyParameter !== undefined) { paramStr = `${paramStr}${buildBodyParameter( operation.bodyParameter, @@ -350,6 +357,31 @@ function getRequestParameters( return paramStr; } +// Specially handle the type for headers because we only allow string/number/boolean values +function buildHeaderParameter( + program: Program, + paramMap: string, + param: Parameter +): string { + if (!param.optional && param.type.nullable === true) { + reportDiagnostic(program, { + code: "nullable-required-header", + target: NoTarget + }); + return paramMap; + } + const conditions = []; + if (param.optional) { + conditions.push(`options?.${param.clientName} !== undefined`); + } + if (param.type.nullable === true) { + conditions.push(`options?.${param.clientName} !== null`); + } + return conditions.length > 0 + ? `...(${conditions.join(" && ")} ? {${paramMap}} : {})` + : paramMap; +} + function buildBodyParameter( bodyParameter: BodyParameter | undefined, importSet: Map> diff --git a/packages/typespec-ts/test/integration/generated/authentication/apiKey/src/authApiKeyClient.ts b/packages/typespec-ts/test/integration/generated/authentication/apiKey/src/authApiKeyClient.ts index 68d432aae9..ba9999ee77 100644 --- a/packages/typespec-ts/test/integration/generated/authentication/apiKey/src/authApiKeyClient.ts +++ b/packages/typespec-ts/test/integration/generated/authentication/apiKey/src/authApiKeyClient.ts @@ -17,13 +17,6 @@ export default function createClient( ): AuthApiKeyClient { const baseUrl = options.baseUrl ?? `http://localhost:3000`; options.apiVersion = options.apiVersion ?? "1.0.0"; - options = { - ...options, - credentials: { - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "x-ms-api-key", - }, - }; - const userAgentInfo = `azsdk-js-auth-apikey-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -37,6 +30,9 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "x-ms-api-key", + }, }; const client = getClient(baseUrl, credentials, options) as AuthApiKeyClient; diff --git a/packages/typespec-ts/test/integration/generated/authentication/oauth2/src/authOauth2Client.ts b/packages/typespec-ts/test/integration/generated/authentication/oauth2/src/authOauth2Client.ts index adf9f91912..1e309b58a4 100644 --- a/packages/typespec-ts/test/integration/generated/authentication/oauth2/src/authOauth2Client.ts +++ b/packages/typespec-ts/test/integration/generated/authentication/oauth2/src/authOauth2Client.ts @@ -17,15 +17,6 @@ export default function createClient( ): AuthOauth2Client { const baseUrl = options.baseUrl ?? `http://localhost:3000`; options.apiVersion = options.apiVersion ?? "1.0.0"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://security.microsoft.com/.default", - ], - }, - }; - const userAgentInfo = `azsdk-js-auth-oauth2-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -39,6 +30,11 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://security.microsoft.com/.default", + ], + }, }; const client = getClient(baseUrl, credentials, options) as AuthOauth2Client; diff --git a/packages/typespec-ts/test/integration/generated/authentication/union/src/authUnionClient.ts b/packages/typespec-ts/test/integration/generated/authentication/union/src/authUnionClient.ts index 7911f656fa..37243bd928 100644 --- a/packages/typespec-ts/test/integration/generated/authentication/union/src/authUnionClient.ts +++ b/packages/typespec-ts/test/integration/generated/authentication/union/src/authUnionClient.ts @@ -17,16 +17,6 @@ export default function createClient( ): AuthUnionClient { const baseUrl = options.baseUrl ?? `http://localhost:3000`; options.apiVersion = options.apiVersion ?? "1.0.0"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://security.microsoft.com/.default", - ], - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "x-ms-api-key", - }, - }; - const userAgentInfo = `azsdk-js-auth-union-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -40,6 +30,12 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://security.microsoft.com/.default", + ], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "x-ms-api-key", + }, }; const client = getClient(baseUrl, credentials, options) as AuthUnionClient; diff --git a/packages/typespec-ts/test/integration/generated/client/structure/default/src/serviceClient.ts b/packages/typespec-ts/test/integration/generated/client/structure/default/src/serviceClient.ts index 2662d65e84..6e0c38bc45 100644 --- a/packages/typespec-ts/test/integration/generated/client/structure/default/src/serviceClient.ts +++ b/packages/typespec-ts/test/integration/generated/client/structure/default/src/serviceClient.ts @@ -19,7 +19,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/client/structure/${clientParam}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-client-structure-default-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/integration/generated/client/structure/multi-client/src/serviceClient.ts b/packages/typespec-ts/test/integration/generated/client/structure/multi-client/src/serviceClient.ts index d8f9abc2ba..9b0a4bdd5c 100644 --- a/packages/typespec-ts/test/integration/generated/client/structure/multi-client/src/serviceClient.ts +++ b/packages/typespec-ts/test/integration/generated/client/structure/multi-client/src/serviceClient.ts @@ -19,7 +19,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/client/structure/${clientParam}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-client-structure-multiclient-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/integration/generated/client/structure/renamed-operation/src/serviceClient.ts b/packages/typespec-ts/test/integration/generated/client/structure/renamed-operation/src/serviceClient.ts index ae797824b4..cabd64afbe 100644 --- a/packages/typespec-ts/test/integration/generated/client/structure/renamed-operation/src/serviceClient.ts +++ b/packages/typespec-ts/test/integration/generated/client/structure/renamed-operation/src/serviceClient.ts @@ -19,7 +19,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/client/structure/${clientParam}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-client-structure-renamed-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/integration/generated/client/structure/two-operation-group/src/serviceClient.ts b/packages/typespec-ts/test/integration/generated/client/structure/two-operation-group/src/serviceClient.ts index 3d11d23464..6413714911 100644 --- a/packages/typespec-ts/test/integration/generated/client/structure/two-operation-group/src/serviceClient.ts +++ b/packages/typespec-ts/test/integration/generated/client/structure/two-operation-group/src/serviceClient.ts @@ -19,7 +19,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/client/structure/${clientParam}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-client-structure-twoopgroup-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/integration/generated/server/path/single/src/singleParamInServerPathClient.ts b/packages/typespec-ts/test/integration/generated/server/path/single/src/singleParamInServerPathClient.ts index eee12768e2..3ae937cd37 100644 --- a/packages/typespec-ts/test/integration/generated/server/path/single/src/singleParamInServerPathClient.ts +++ b/packages/typespec-ts/test/integration/generated/server/path/single/src/singleParamInServerPathClient.ts @@ -16,7 +16,6 @@ export default function createClient( ): SingleParamInServerPathClient { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-singleparam-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/modularIntegration/generated/authentication/api-key/src/rest/apiKeyClient.ts b/packages/typespec-ts/test/modularIntegration/generated/authentication/api-key/src/rest/apiKeyClient.ts index 6d4fba934f..1f6424abce 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/authentication/api-key/src/rest/apiKeyClient.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/authentication/api-key/src/rest/apiKeyClient.ts @@ -17,13 +17,6 @@ export default function createClient( ): ApiKeyContext { const baseUrl = options.baseUrl ?? `http://localhost:3000`; options.apiVersion = options.apiVersion ?? "1.0.0"; - options = { - ...options, - credentials: { - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "x-ms-api-key", - }, - }; - const userAgentInfo = `azsdk-js-azure-api-key-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -37,6 +30,9 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "x-ms-api-key", + }, }; const client = getClient(baseUrl, credentials, options) as ApiKeyContext; diff --git a/packages/typespec-ts/test/modularIntegration/generated/authentication/oauth2/src/rest/oAuth2Client.ts b/packages/typespec-ts/test/modularIntegration/generated/authentication/oauth2/src/rest/oAuth2Client.ts index 8cc88bd378..8d80f26385 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/authentication/oauth2/src/rest/oAuth2Client.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/authentication/oauth2/src/rest/oAuth2Client.ts @@ -17,15 +17,6 @@ export default function createClient( ): OAuth2Context { const baseUrl = options.baseUrl ?? `http://localhost:3000`; options.apiVersion = options.apiVersion ?? "1.0.0"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://security.microsoft.com/.default", - ], - }, - }; - const userAgentInfo = `azsdk-js-azure-oauth2-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -39,6 +30,11 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://security.microsoft.com/.default", + ], + }, }; const client = getClient(baseUrl, credentials, options) as OAuth2Context; diff --git a/packages/typespec-ts/test/modularIntegration/generated/authentication/union/src/rest/unionClient.ts b/packages/typespec-ts/test/modularIntegration/generated/authentication/union/src/rest/unionClient.ts index a5a1e6f738..b2ada3754e 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/authentication/union/src/rest/unionClient.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/authentication/union/src/rest/unionClient.ts @@ -17,16 +17,6 @@ export default function createClient( ): UnionContext { const baseUrl = options.baseUrl ?? `http://localhost:3000`; options.apiVersion = options.apiVersion ?? "1.0.0"; - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? [ - "https://security.microsoft.com/.default", - ], - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "x-ms-api-key", - }, - }; - const userAgentInfo = `azsdk-js-azure-auth-union-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix @@ -40,6 +30,12 @@ export default function createClient( loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info, }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://security.microsoft.com/.default", + ], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "x-ms-api-key", + }, }; const client = getClient(baseUrl, credentials, options) as UnionContext; diff --git a/packages/typespec-ts/test/modularIntegration/generated/client/structure/default/src/rest/serviceClient.ts b/packages/typespec-ts/test/modularIntegration/generated/client/structure/default/src/rest/serviceClient.ts index e4edfb4b08..156f6a6a31 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/client/structure/default/src/rest/serviceClient.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/client/structure/default/src/rest/serviceClient.ts @@ -19,7 +19,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/client/structure/${clientParam}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-client-structure-default-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/modularIntegration/generated/client/structure/multi-client/src/rest/serviceClient.ts b/packages/typespec-ts/test/modularIntegration/generated/client/structure/multi-client/src/rest/serviceClient.ts index 0a14b756f2..6a3316263d 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/client/structure/multi-client/src/rest/serviceClient.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/client/structure/multi-client/src/rest/serviceClient.ts @@ -19,7 +19,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/client/structure/${clientParam}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-client-structure-multiclient-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/modularIntegration/generated/client/structure/renamed-operation/src/rest/serviceClient.ts b/packages/typespec-ts/test/modularIntegration/generated/client/structure/renamed-operation/src/rest/serviceClient.ts index c918dad361..9e5d8344b7 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/client/structure/renamed-operation/src/rest/serviceClient.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/client/structure/renamed-operation/src/rest/serviceClient.ts @@ -19,7 +19,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/client/structure/${clientParam}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-client-structure-renamed-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/modularIntegration/generated/client/structure/two-operation-group/src/rest/serviceClient.ts b/packages/typespec-ts/test/modularIntegration/generated/client/structure/two-operation-group/src/rest/serviceClient.ts index c2063d5869..5328790109 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/client/structure/two-operation-group/src/rest/serviceClient.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/client/structure/two-operation-group/src/rest/serviceClient.ts @@ -19,7 +19,6 @@ export default function createClient( const baseUrl = options.baseUrl ?? `${endpoint}/client/structure/${clientParam}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-client-structure-twoopgroup-rest/1.0.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/modularIntegration/generated/server/path/single/src/rest/singleClient.ts b/packages/typespec-ts/test/modularIntegration/generated/server/path/single/src/rest/singleClient.ts index 9a23f0f517..a1d575feaf 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/server/path/single/src/rest/singleClient.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/server/path/single/src/rest/singleClient.ts @@ -16,7 +16,6 @@ export default function createClient( ): SingleContext { const baseUrl = options.baseUrl ?? `${endpoint}`; options.apiVersion = options.apiVersion ?? "1.0.0"; - const userAgentInfo = `azsdk-js-singleparam-rest/1.0.0-beta.1`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/modularUnit/operations.spec.ts b/packages/typespec-ts/test/modularUnit/operations.spec.ts new file mode 100644 index 0000000000..79f5da55b7 --- /dev/null +++ b/packages/typespec-ts/test/modularUnit/operations.spec.ts @@ -0,0 +1,162 @@ +import { assert } from "chai"; +import { emitModularOperationsFromTypeSpec } from "../util/emitUtil.js"; +import { assertEqualContent } from "../util/testUtil.js"; +import { Diagnostic } from "@typespec/compiler"; + +describe("operations", () => { + it("required & optional & nullable headers", async () => { + const tspContent = ` + model Bar { + prop1: string; + prop2: int64; + } + @encode(BytesKnownEncoding.base64url) + scalar base64urlBytes extends bytes; + op read( + @header requiredHeader: string, + @header optionalHeader?: string, + @header nullableOptionalHeader?: string | null, + @header bytesHeader: bytes, + @header @encode(BytesKnownEncoding.base64) value: bytes, + @header({ + format: "csv", + }) + csvArrayHeader: base64urlBytes[], + @header utcDateHeader: utcDateTime, + @header optionalDateHeader?: utcDateTime, + @header nullableDateHeader?: utcDateTime | null, + ...Bar): OkResponse; + `; + + const operationFiles = await emitModularOperationsFromTypeSpec(tspContent); + assert.ok(operationFiles); + assert.equal(operationFiles?.length, 1); + // console.log(operationFiles?.[0]?.getFullText()!); + assertEqualContent( + operationFiles?.[0]?.getFullText()!, + ` + import { TestingContext as Client } from "../rest/index.js"; + import { StreamableMethod, operationOptionsToRequestParameters } from "@azure-rest/core-client"; + import { uint8ArrayToString } from "@azure/core-util"; + + export function _readSend( + context: Client, + requiredHeader: string, + bytesHeader: Uint8Array, + value: Uint8Array, + csvArrayHeader: Uint8Array[], + utcDateHeader: Date, + body: Bar, + options: ReadOptions = { requestOptions: {} }): StreamableMethod { + return context.path("/", ).post({...operationOptionsToRequestParameters(options), + headers: { + "required-header": requiredHeader, + ...(options?.optionalHeader !== undefined + ? { "optional-header": options?.optionalHeader } + : {}), + ...(options?.nullableOptionalHeader !== undefined && options?.nullableOptionalHeader !== null + ? { "nullable-optional-header": options?.nullableOptionalHeader } + : {}), + "bytes-header": uint8ArrayToString(bytesHeader, "base64"), + value: uint8ArrayToString(value, "base64"), + "csv-array-header": buildCsvCollection( + (csvArrayHeader ?? []).map((p) => uint8ArrayToString(p, "base64url")) + ), + "utc-date-header": utcDateHeader.toUTCString(), + ...(options?.optionalDateHeader !== undefined + ? { + "optional-date-header": + options?.optionalDateHeader?.toUTCString(), + } + : {}), + ...(options?.nullableDateHeader !== undefined && options?.nullableDateHeader !== null + ? { + "nullable-date-header": + options?.nullableDateHeader?.toUTCString(), + } + : {}), + }, + body: {"prop1": body["prop1"], "prop2": body["prop2"]},}); + } + + export async function _readDeserialize(result: Read200Response): Promise { + if(result.status !== "200"){ + throw result.body + } + + return; + } + + export async function read( + context: Client, + requiredHeader: string, + bytesHeader: Uint8Array, + value: Uint8Array, + csvArrayHeader: Uint8Array[], + utcDateHeader: Date, + body: Bar, + options: ReadOptions = { requestOptions: {} }): Promise { + const result = await _readSend(context, requiredHeader, bytesHeader, value, csvArrayHeader, utcDateHeader, body, options); + return _readDeserialize(result); + }`, + true + ); + }); + + it("required nullable header would report diagnostic", async () => { + try { + const tspContent = ` + op read( @header nullableRequiredHeader: string | null): OkResponse; + `; + + await emitModularOperationsFromTypeSpec(tspContent, true); + assert.fail("Should throw diagnostic warnings"); + } catch (e) { + const diagnostics = e as Diagnostic[]; + assert.equal(diagnostics.length, 1); + assert.equal( + diagnostics[0]?.code, + "@azure-tools/typespec-ts/nullable-required-header" + ); + assert.equal(diagnostics[0]?.severity, "warning"); + } + }); + + it("should generate code for required nullable header", async () => { + const tspContent = ` + op read( @header nullableRequiredHeader: string | null): OkResponse; + `; + const operationFiles = await emitModularOperationsFromTypeSpec( + tspContent, + false + ); + assert.ok(operationFiles); + assert.equal(operationFiles?.length, 1); + console.log(operationFiles?.[0]?.getFullText()!); + assertEqualContent( + operationFiles?.[0]?.getFullText()!, + ` + import { TestingContext as Client } from "../rest/index.js"; + import { StreamableMethod, operationOptionsToRequestParameters } from "@azure-rest/core-client"; + + export function _readSend(context: Client, nullableRequiredHeader: (string | null), options: ReadOptions = { requestOptions: {} }): StreamableMethod { + return context.path("/", ).get({...operationOptionsToRequestParameters(options), + headers: {"nullable-required-header": nullableRequiredHeader},}); + } + + export async function _readDeserialize(result: Read200Response): Promise { + if(result.status !== "200"){ + throw result.body + } + + return; + } + + export async function read(context: Client, nullableRequiredHeader: (string | null), options: ReadOptions = { requestOptions: {} }): Promise { + const result = await _readSend(context, nullableRequiredHeader, options); + return _readDeserialize(result); + } + ` + ); + }); +}); diff --git a/packages/typespec-ts/test/unit/apiVersion.spec.ts b/packages/typespec-ts/test/unit/apiVersion.spec.ts index ba9de5adca..97898a407a 100644 --- a/packages/typespec-ts/test/unit/apiVersion.spec.ts +++ b/packages/typespec-ts/test/unit/apiVersion.spec.ts @@ -144,7 +144,6 @@ const buildDefaultReturn = (hasDefault: boolean) => { ): testClient { const baseUrl = options.baseUrl ?? \`\${endpoint}/language\`; ${defaultDef} - const userAgentInfo = \`azsdk-js-test-rest/1.0.0-beta.1\`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix diff --git a/packages/typespec-ts/test/unit/clientFactoryGenerator.spec.ts b/packages/typespec-ts/test/unit/clientFactoryGenerator.spec.ts index 92730e4a3f..f227b77f21 100644 --- a/packages/typespec-ts/test/unit/clientFactoryGenerator.spec.ts +++ b/packages/typespec-ts/test/unit/clientFactoryGenerator.spec.ts @@ -364,21 +364,21 @@ describe("Client Factory generation", () => { }); describe("should handle different auth options", () => { - it("should not generate credential if scope is empty", async () => { + it("should diagnost warning if scope is empty", async () => { try { await emitClientFactoryFromTypeSpec( ` - @useAuth( - OAuth2Auth<[{ - type: OAuth2FlowType.implicit, - authorizationUrl: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", - scopes: [] - }]>) - @service( {title: "PetStoreClient"}) - namespace PetStore; - `, + @useAuth( + OAuth2Auth<[{ + type: OAuth2FlowType.implicit, + authorizationUrl: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", + scopes: [] + }]>) + @service( {title: "PetStoreClient"}) + namespace PetStore; + `, false, - false + true ); assert.fail("Should throw diagnostic errors"); } catch (e) { @@ -391,6 +391,64 @@ describe("Client Factory generation", () => { } }); + it("should generate TokenCredential if scope is empty", async () => { + const factoryFile = await emitClientFactoryFromTypeSpec( + ` + @useAuth( + OAuth2Auth<[{ + type: OAuth2FlowType.implicit, + authorizationUrl: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", + scopes: [] + }]>) + @service( {title: "PetStoreClient"}) + namespace PetStore; + `, + false, + false + ); + + assert.ok(factoryFile); + // console.log(factoryFile!.content); + assertEqualContent( + factoryFile!.content, + ` + import { getClient, ClientOptions } from "@azure-rest/core-client"; + import { logger } from "./logger"; + import { TokenCredential } from "@azure/core-auth"; + import { testClient } from "./clientDefinitions"; + + /** + * Initialize a new instance of \`testClient\` + * @param endpoint - The parameter endpoint + * @param credentials - uniquely identify client credential + * @param options - the parameter for all optional parameters + */ + export default function createClient(endpoint: string, credentials: TokenCredential, options: ClientOptions = {}): testClient { + const baseUrl = options.baseUrl ?? \`\${endpoint}\`; + + const userAgentInfo = \`azsdk-js-test-rest/1.0.0-beta.1\`; + const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? \`\${options.userAgentOptions.userAgentPrefix} \${userAgentInfo}\`: \`\${userAgentInfo}\`;; + options = { + ...options, + userAgentOptions: { + userAgentPrefix + }, + loggingOptions: { + logger: options.loggingOptions?.logger ?? logger.info + }, + credentials: { + scopes: options.credentials?.scopes ?? [\`\${baseUrl}/.default\`], + }, + }; + + const client = getClient(baseUrl, credentials, options) as testClient; + + return client; + } + ` + ); + }); + it("should generate both credentials if both defined", async () => { const models = await emitClientFactoryFromTypeSpec(` @useAuth( @@ -420,14 +478,6 @@ describe("Client Factory generation", () => { */ export default function createClient(endpoint: string, credentials: TokenCredential | KeyCredential, options: ClientOptions = {}): testClient { const baseUrl = options.baseUrl ?? \`\${endpoint}\`; - - options = { - ...options, - credentials: { - scopes: options.credentials?.scopes ?? ["https://petstor.com/default"], - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "apiKey", - }, - }; const userAgentInfo = \`azsdk-js-test-rest/1.0.0-beta.1\`; const userAgentPrefix = @@ -442,6 +492,10 @@ describe("Client Factory generation", () => { loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://petstor.com/default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "apiKey", + }, }; const client = getClient(baseUrl, credentials, options) as testClient; diff --git a/packages/typespec-ts/test/util/emitUtil.ts b/packages/typespec-ts/test/util/emitUtil.ts index 7f01386237..7201a0e8d8 100644 --- a/packages/typespec-ts/test/util/emitUtil.ts +++ b/packages/typespec-ts/test/util/emitUtil.ts @@ -167,7 +167,7 @@ export async function emitClientDefinitionFromTypeSpec( export async function emitClientFactoryFromTypeSpec( tspContent: string, needAzureCore: boolean = false, - isEmptyDiagnostic = true, + mustEmptyDiagnostic = true, withRawContent = false ) { const context = await rlcEmitterFor( @@ -187,9 +187,7 @@ export async function emitClientFactoryFromTypeSpec( if (clients && clients[0]) { apiVersionInfo = transformApiVersionInfo(clients[0], dpgContext, urlInfo); } - if (isEmptyDiagnostic) { - expectDiagnosticEmpty(dpgContext.program.diagnostics); - } else { + if (mustEmptyDiagnostic && dpgContext.program.diagnostics.length > 0) { throw dpgContext.program.diagnostics; } @@ -292,7 +290,10 @@ export async function emitModularModelsFromTypeSpec( return undefined; } -export async function emitModularOperationsFromTypeSpec(tspContent: string) { +export async function emitModularOperationsFromTypeSpec( + tspContent: string, + mustEmptyDiagnostic = true +) { const context = await rlcEmitterFor(tspContent); const dpgContext = createDpgContextTestHelper(context.program); const serviceNameToRlcModelsMap: Map = new Map< @@ -320,15 +321,18 @@ export async function emitModularOperationsFromTypeSpec(tspContent: string) { modularCodeModel.clients.length > 0 && modularCodeModel.clients[0] ) { - return buildOperationFiles( + const res = buildOperationFiles( dpgContext, modularCodeModel, modularCodeModel.clients[0], false ); + if (mustEmptyDiagnostic && dpgContext.program.diagnostics.length > 0) { + throw dpgContext.program.diagnostics; + } + return res; } } - expectDiagnosticEmpty(dpgContext.program.diagnostics); return undefined; }