diff --git a/sdk/batch/Azure.Compute.Batch/CHANGELOG.md b/sdk/batch/Azure.Compute.Batch/CHANGELOG.md index c265ccc6f617..cfb6ee18c931 100644 --- a/sdk/batch/Azure.Compute.Batch/CHANGELOG.md +++ b/sdk/batch/Azure.Compute.Batch/CHANGELOG.md @@ -1,14 +1,60 @@ # Release History -## 1.0.0-beta.4 (Unreleased) +## 1.0.0-beta.4 (2025-09-01) ### Features Added +Added Models: +- Added `BatchJobDefaultOrder` +- Added `BatchPoolIdentityReference` +- Added `DiskCustomerManagedKey` +- Added `DiskEncryptionSetParameters` +- Added `HostEndpointSettings` +- Added `HostEndpointSettingsModeTypes` +- Added `IPFamily` +- Added `IpTag` +- Added `ProxyAgentSettings` + +Added Properties: +- Added `ipv6Address` to `BatchNode` +- Added `Ipv6RemoteLoginIpAddress` and `Ipv6RemoteLoginPort` to `BatchNodeRemoteLoginSettings` +- Added `IpFamilies` and `IpTags` to `BatchPublicAddressConfiguration` +- Added `JobDefaultOder` to `BatchTaskSchedulingPolicy` +- Added `ManagedDisk` to `DataDisk` +- Added `CustomerManagedKey` to `DiskEncryptionConfiguration` +- Added `DiskEncryptionSet` to `ManagedDisk` +- Added `DiskWithVMGuestStateValue` to `SecurityEncryptionTypes` +- Added `ProxyAgentSetting` to `SecurityProfile` + + ### Breaking Changes -### Bugs Fixed +Removed Certificate API's: +- Removed `BatchClient.CreateCertificate` +- Removed `BatchClient.CancelCertificateDeletion` +- Removed `BatchClient.DeleteCertificate` +- Removed `BatchClient.GetCertificate` +- Removed `BatchClient.GetCertificates` + +Removed Modes: +- Removed `BatchCertificate` +- Removed `BatchCertificateDeleteError` +- Removed `BatchCertificateFormat` +- Removed `BatchCertificateReference` +- Removed `BatchCertificateState` +- Removed `BatchCertificateStoreLocation` +- Removed `BatchCertificateVisibility` +- Removed `BatchNodeCommunicationMode` + +Removed Properties: +- Removed `CertificateReferences` from `BatchNode` +- Removed `ResourceTags` and `CertificateReferences` from `BatchPool` +- Removed `CertificateReferences`, `ResourceTags`, and `TargetNodeCommunicationMode` from `BatchPoolCreateOptions` +- Removed `CertificateReferences` and `TargetNodeCommunicationMode` from `BatchPoolReplaceOptions` +- Removed `CertificateReferences`, `ResourceTags`, and `TargetNodeCommunicationMode` from `BatchPoolSpecifications` +- Removed `CertificateReferences`, `ResourceTags`, and `TargetNodeCommunicationMode` from `BatchPoolUpdateOptions` +- Removed `CertificateReferences`, `ResourceTags`, and `TargetNodeCommunicationMode` from `ComputeBatchModelFactory` -### Other Changes ## 1.0.0-beta.3 (2025-06-19) diff --git a/sdk/batch/Azure.Compute.Batch/MigrationGuide.md b/sdk/batch/Azure.Compute.Batch/MigrationGuide.md index d1d2248040ad..c2d66d3b4825 100644 --- a/sdk/batch/Azure.Compute.Batch/MigrationGuide.md +++ b/sdk/batch/Azure.Compute.Batch/MigrationGuide.md @@ -72,13 +72,6 @@ Familiarity with the legacy client library is assumed. For those new to the Azur - [File Properties](#get-node-file-properties) - [GetRemoteLoginSettings](#getremoteloginsettings) - [UploadComputeNodeBatchServiceLogs](#uploadcomputenodebatchservicelogs) - - [Certificate Operations](#certificate-operations) - - [CreateCertificateFromCer](#createcertificatefromcer) - - [CreateCertificateFromPfx](#createcertificatefrompfx) - - [GetCertificate](#getcertificate) - - [ListCertificates](#listcertificates) - - [DeleteCertificate](#deletecertificate) - - [CancelDeleteCertificate](#canceldeletecertificate) - [Application Operations](#application-operations) - [GetApplicationSummary](#getapplicationsummary) - [ListApplicationSummaries](#listapplicationsummaries) @@ -522,15 +515,7 @@ BatchApplicationPackageReference[] batchApplicationPackageReferences = new Batch } }; -BatchCertificateReference[] certificateReferences = new BatchCertificateReference[] { - new BatchCertificateReference("thumbprint","thumbprintAlgorithm") - { - StoreLocation = "storeLocation", - StoreName = "storeName" - } -}; - -BatchPoolReplaceOptions replaceOptions = new BatchPoolReplaceOptions(certificateReferences, batchApplicationPackageReferences, metadataItems); +BatchPoolReplaceOptions replaceOptions = new BatchPoolReplaceOptions(batchApplicationPackageReferences, metadataItems); batchClient.ReplacePoolProperties("poolID", replaceOptions); ``` #### ResizePool @@ -1658,141 +1643,6 @@ UploadBatchServiceLogsOptions uploadBatchServiceLogsOptions = new UploadBatchSer UploadBatchServiceLogsResult uploadBatchServiceLogsResult = batchClient.UploadNodeLogs("poolId", "computeNodeId", uploadBatchServiceLogsOptions); ``` -### Certificate Operations - -> Note: Certificates has been [deprecated]. - -#### CreateCertificateFromCer - -Previously in `Microsoft.Azure.Batch` to create a Certificate from an existing cert you could call the `CreateCertificateFromCer` method with the path to the cert. - -``` C# -Certificate cerCertificate = batchClient.CertificateOperations.CreateCertificateFromCer("cerFilePath"); -``` - -With `Azure.Compute.Batch` call `CreateCertificate` with a `BatchCertificate` param to create a cert - -```C# Snippet:Batch_Migration_CreateCerCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); -byte[] certData = File.ReadAllBytes("certPath"); -BatchCertificate cerCertificate = new BatchCertificate("Thumbprint", "ThumbprintAlgorithm", BinaryData.FromBytes(certData)) -{ - CertificateFormat = BatchCertificateFormat.Cer, - Password = "", -}; - -Response response = batchClient.CreateCertificate(cerCertificate); -``` - -#### CreateCertificateFromPfx - -Previously in `Microsoft.Azure.Batch` to create a Certificate from an existing pfx cert you could call the `CreateCertificateFromPfx` method with the path to the cert. - -``` C# -Certificate pfxCertificate = batchClient.CertificateOperations.CreateCertificateFromPfx("pfxFilePath", "CertificatePassword"); -``` - -With `Azure.Compute.Batch` call `CreateCertificate` with a `BatchCertificate` param to create a cert - -```C# Snippet:Batch_Migration_CreatePfxCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -byte[] certData = File.ReadAllBytes("certPath"); -BatchCertificate cerCertificate = new BatchCertificate("Thumbprint", "ThumbprintAlgorithm", BinaryData.FromBytes(certData)) -{ - CertificateFormat = BatchCertificateFormat.Pfx, - Password = "password", -}; - -Response response = batchClient.CreateCertificate(cerCertificate); -``` - -#### GetCertificate - -Previously in `Microsoft.Azure.Batch` to get a Certificate you could call the `GetCertificate` method from `CertificateOperations`. - -``` C# -Certificate boundCert = batchClient.CertificateOperations.GetCertificate( "ThumbprintAlgorithm", "Thumbprint") -``` - -With `Azure.Compute.Batch` call `GetCertificate` to get the certificate which will return a `GetCertificateResponse`. - -```C# Snippet:Batch_Migration_GetCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -BatchCertificate cerCertificateResponse = batchClient.GetCertificate("ThumbprintAlgorithm", "Thumbprint"); -``` - -#### ListCertificates - -Previously in `Microsoft.Azure.Batch` to get a list of Certificates you could call the `ListCertificates` method from `CertificateOperations`. - -``` C# -foreach (Certificate curCert in batchClient.CertificateOperations.ListCertificates()) -{ - // do something -} -``` - -With `Azure.Compute.Batch` call `GetCertificates` to get a list of certificates. - -```C# Snippet:Batch_Migration_ListCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -foreach (BatchCertificate item in batchClient.GetCertificates()) -{ - // do something -} -``` - -#### DeleteCertificate - -Previously in `Microsoft.Azure.Batch` to delete a Certificate you could call the `DeleteCertificate` method from `CertificateOperations`. - -``` C# -batchClient.CertificateOperations.DeleteCertificate("ThumbprintAlgorithm", "Thumbprint"); -``` - -With `Azure.Compute.Batch` call `DeleteCertificate` to delete a certificate. - -```C# Snippet:Batch_Migration_DeleteCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -batchClient.DeleteCertificate("ThumbprintAlgorithm", "Thumbprint"); -``` -Optionally you can use the returned `DeleteCertificateOperation` object to wait for the operation to complete. - -```C# Snippet:Batch_Migration_DeleteCertificate_Operation -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -DeleteCertificateOperation operation = batchClient.DeleteCertificate("ThumbprintAlgorithm", "Thumbprint"); - -// Optional, wait for operation to complete -operation.WaitForCompletion(); -``` -#### CancelDeleteCertificate - -Previously in `Microsoft.Azure.Batch` to cancel a delete of a Certificate you could call the `CancelDeleteCertificate` method from `CertificateOperations`. - -``` C# -batchClient.CertificateOperations.CancelDeleteCertificate("ThumbprintAlgorithm", "Thumbprint"); -``` - -With `Azure.Compute.Batch` call `CancelCertificateDeletion` to cancel a delete of a certificate. - -```C# Snippet:Batch_Migration_CancelDeleteCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -batchClient.CancelCertificateDeletion("ThumbprintAlgorithm", "Thumbprint"); -``` - ### Application Operations #### GetApplicationSummary diff --git a/sdk/batch/Azure.Compute.Batch/README.md b/sdk/batch/Azure.Compute.Batch/README.md index 5ea6c14299ae..a6f2a414f9f3 100644 --- a/sdk/batch/Azure.Compute.Batch/README.md +++ b/sdk/batch/Azure.Compute.Batch/README.md @@ -147,12 +147,6 @@ The following section provides several synchronous code snippets covering some o * [Node File Properties](#get-node-file-properties) * [Get Remote Login Settings](#getremoteloginsettings) * [Upload Compute Node BatchService Logs](#uploadcomputenodebatchservicelogs) -* [Certificate Operations](#certificate-operations) - * [Create a Certificate](#createcertificate) - * [Get a Certificatec](#getcertificate) - * [List Certificates](#listcertificates) - * [Delete Certificate](#deletecertificate) - * [Cancel Delete Certificate](#canceldeletecertificate) * [Application Operations](#application-operations) * [Get Application](#get-application) * [List Applications](#list-application) @@ -330,15 +324,7 @@ BatchApplicationPackageReference[] batchApplicationPackageReferences = new Batch } }; -BatchCertificateReference[] certificateReferences = new BatchCertificateReference[] { - new BatchCertificateReference("thumbprint","thumbprintAlgorithm") - { - StoreLocation = "storeLocation", - StoreName = "storeName" - } -}; - -BatchPoolReplaceOptions replaceOptions = new BatchPoolReplaceOptions(certificateReferences, batchApplicationPackageReferences, metadataItems); +BatchPoolReplaceOptions replaceOptions = new BatchPoolReplaceOptions(batchApplicationPackageReferences, metadataItems); batchClient.ReplacePoolProperties("poolID", replaceOptions); ``` #### Resize Pool @@ -1159,85 +1145,6 @@ UploadBatchServiceLogsOptions uploadBatchServiceLogsOptions = new UploadBatchSer UploadBatchServiceLogsResult uploadBatchServiceLogsResult = batchClient.UploadNodeLogs("poolId", "computeNodeId", uploadBatchServiceLogsOptions); ``` -### Certificate Operations - -> Note: Certificates has been [deprecated]. - -#### CreateCertificate - -Call `CreateCertificate` with a `BatchCertificate` param to create a Certificate. - -```C# Snippet:Batch_Migration_CreateCerCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); -byte[] certData = File.ReadAllBytes("certPath"); -BatchCertificate cerCertificate = new BatchCertificate("Thumbprint", "ThumbprintAlgorithm", BinaryData.FromBytes(certData)) -{ - CertificateFormat = BatchCertificateFormat.Cer, - Password = "", -}; - -Response response = batchClient.CreateCertificate(cerCertificate); -``` - -#### GetCertificate - -Call `GetCertificate` to get the certificate which will return a `GetCertificateResponse`. - -```C# Snippet:Batch_Migration_GetCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -BatchCertificate cerCertificateResponse = batchClient.GetCertificate("ThumbprintAlgorithm", "Thumbprint"); -``` - -#### ListCertificates - -Call `GetCertificates` to get a list of certificates. - -```C# Snippet:Batch_Migration_ListCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -foreach (BatchCertificate item in batchClient.GetCertificates()) -{ - // do something -} -``` - -#### DeleteCertificate - -Call `DeleteCertificate` to delete a Certificate. - -```C# Snippet:Batch_Migration_DeleteCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -batchClient.DeleteCertificate("ThumbprintAlgorithm", "Thumbprint"); -``` -Optionally you can use the returned `DeleteCertificateOperation` object to wait for the operation to complete. - -```C# Snippet:Batch_Migration_DeleteCertificate_Operation -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -DeleteCertificateOperation operation = batchClient.DeleteCertificate("ThumbprintAlgorithm", "Thumbprint"); - -// Optional, wait for operation to complete -operation.WaitForCompletion(); -``` - -#### CancelDeleteCertificate - -Call `CancelCertificateDeletion` to cancel a delete of a certificate. - -```C# Snippet:Batch_Migration_CancelDeleteCertificate -BatchClient batchClient = new BatchClient( -new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - -batchClient.CancelCertificateDeletion("ThumbprintAlgorithm", "Thumbprint"); -``` - ### Application Operations #### Get Application diff --git a/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.net8.0.cs b/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.net8.0.cs index 2e1b5143f449..243bf5533fef 100644 --- a/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.net8.0.cs +++ b/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.net8.0.cs @@ -228,130 +228,6 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } - public partial class BatchCertificate : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel - { - public BatchCertificate(string thumbprint, string thumbprintAlgorithm, System.BinaryData data) { } - public Azure.Compute.Batch.BatchCertificateFormat? CertificateFormat { get { throw null; } set { } } - public System.BinaryData Data { get { throw null; } set { } } - public Azure.Compute.Batch.BatchCertificateDeleteError DeleteCertificateError { get { throw null; } } - public string Password { get { throw null; } set { } } - public Azure.Compute.Batch.BatchCertificateState? PreviousState { get { throw null; } } - public System.DateTimeOffset? PreviousStateTransitionTime { get { throw null; } } - public string PublicData { get { throw null; } } - public Azure.Compute.Batch.BatchCertificateState? State { get { throw null; } } - public System.DateTimeOffset? StateTransitionTime { get { throw null; } } - public string Thumbprint { get { throw null; } set { } } - public string ThumbprintAlgorithm { get { throw null; } set { } } - public System.Uri Uri { get { throw null; } } - protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificate System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificate System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - } - public partial class BatchCertificateDeleteError : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel - { - internal BatchCertificateDeleteError() { } - public string Code { get { throw null; } } - public string Message { get { throw null; } } - public System.Collections.Generic.IReadOnlyList Values { get { throw null; } } - protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificateDeleteError System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificateDeleteError System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchCertificateFormat : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchCertificateFormat(string value) { throw null; } - public static Azure.Compute.Batch.BatchCertificateFormat Cer { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateFormat Pfx { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchCertificateFormat other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchCertificateFormat left, Azure.Compute.Batch.BatchCertificateFormat right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchCertificateFormat (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchCertificateFormat left, Azure.Compute.Batch.BatchCertificateFormat right) { throw null; } - public override string ToString() { throw null; } - } - public partial class BatchCertificateReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel - { - public BatchCertificateReference(string thumbprint, string thumbprintAlgorithm) { } - public Azure.Compute.Batch.BatchCertificateStoreLocation? StoreLocation { get { throw null; } set { } } - public string StoreName { get { throw null; } set { } } - public string Thumbprint { get { throw null; } set { } } - public string ThumbprintAlgorithm { get { throw null; } set { } } - public System.Collections.Generic.IList Visibility { get { throw null; } } - protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificateReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificateReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchCertificateState : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchCertificateState(string value) { throw null; } - public static Azure.Compute.Batch.BatchCertificateState Active { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateState DeleteFailed { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateState Deleting { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchCertificateState other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchCertificateState left, Azure.Compute.Batch.BatchCertificateState right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchCertificateState (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchCertificateState left, Azure.Compute.Batch.BatchCertificateState right) { throw null; } - public override string ToString() { throw null; } - } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchCertificateStoreLocation : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchCertificateStoreLocation(string value) { throw null; } - public static Azure.Compute.Batch.BatchCertificateStoreLocation CurrentUser { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateStoreLocation LocalMachine { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchCertificateStoreLocation other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchCertificateStoreLocation left, Azure.Compute.Batch.BatchCertificateStoreLocation right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchCertificateStoreLocation (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchCertificateStoreLocation left, Azure.Compute.Batch.BatchCertificateStoreLocation right) { throw null; } - public override string ToString() { throw null; } - } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchCertificateVisibility : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchCertificateVisibility(string value) { throw null; } - public static Azure.Compute.Batch.BatchCertificateVisibility RemoteUser { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateVisibility StartTask { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateVisibility Task { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchCertificateVisibility other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchCertificateVisibility left, Azure.Compute.Batch.BatchCertificateVisibility right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchCertificateVisibility (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchCertificateVisibility left, Azure.Compute.Batch.BatchCertificateVisibility right) { throw null; } - public override string ToString() { throw null; } - } public partial class BatchClient { protected BatchClient() { } @@ -360,12 +236,6 @@ public BatchClient(System.Uri endpoint, Azure.AzureNamedKeyCredential credential public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential) { } public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, Azure.Compute.Batch.BatchClientOptions options) { } public virtual Azure.Core.Pipeline.HttpPipeline Pipeline { get { throw null; } } - public virtual Azure.Response CancelCertificateDeletion(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } - public virtual System.Threading.Tasks.Task CancelCertificateDeletionAsync(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } - public virtual Azure.Response CreateCertificate(Azure.Compute.Batch.BatchCertificate certificate, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Response CreateCertificate(Azure.Core.RequestContent content, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } - public virtual System.Threading.Tasks.Task CreateCertificateAsync(Azure.Compute.Batch.BatchCertificate certificate, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task CreateCertificateAsync(Azure.Core.RequestContent content, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } public virtual Azure.Response CreateJob(Azure.Compute.Batch.BatchJobCreateOptions job, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateJob(Azure.Core.RequestContent content, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } public virtual System.Threading.Tasks.Task CreateJobAsync(Azure.Compute.Batch.BatchJobCreateOptions job, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -394,8 +264,6 @@ public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, A public virtual System.Threading.Tasks.Task CreateTasksAsync(string jobId, System.Collections.Generic.IEnumerable tasksToAdd, Azure.Compute.Batch.CreateTasksOptions createTasksOptions = null, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Compute.Batch.DeallocateNodeOperation DeallocateNode(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeDeallocateOptions parameters = null, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task DeallocateNodeAsync(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeDeallocateOptions parameters = null, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Compute.Batch.DeleteCertificateOperation DeleteCertificate(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } - public virtual System.Threading.Tasks.Task DeleteCertificateAsync(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } public virtual Azure.Compute.Batch.DeleteJobOperation DeleteJob(string jobId, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), bool? force = default(bool?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } public virtual System.Threading.Tasks.Task DeleteJobAsync(string jobId, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), bool? force = default(bool?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } public virtual Azure.Compute.Batch.DeleteJobScheduleOperation DeleteJobSchedule(string jobScheduleId, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), bool? force = default(bool?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } @@ -442,14 +310,6 @@ public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, A public virtual Azure.Pageable GetApplications(System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.AsyncPageable GetApplicationsAsync(System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, int? maxresults, Azure.RequestContext context) { throw null; } public virtual Azure.AsyncPageable GetApplicationsAsync(System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Response GetCertificate(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } - public virtual Azure.Response GetCertificate(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } - public virtual System.Threading.Tasks.Task> GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Pageable GetCertificates(System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } - public virtual Azure.Pageable GetCertificates(System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.AsyncPageable GetCertificatesAsync(System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } - public virtual Azure.AsyncPageable GetCertificatesAsync(System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response GetJob(string jobId, System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } public virtual Azure.Response GetJob(string jobId, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task GetJobAsync(string jobId, System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } @@ -613,10 +473,10 @@ public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, A } public partial class BatchClientOptions : Azure.Core.ClientOptions { - public BatchClientOptions(Azure.Compute.Batch.BatchClientOptions.ServiceVersion version = Azure.Compute.Batch.BatchClientOptions.ServiceVersion.V2024_07_01_20_0) { } + public BatchClientOptions(Azure.Compute.Batch.BatchClientOptions.ServiceVersion version = Azure.Compute.Batch.BatchClientOptions.ServiceVersion.V2025_06_01) { } public enum ServiceVersion { - V2024_07_01_20_0 = 1, + V2025_06_01 = 1, } } public partial class BatchContainerConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel @@ -963,6 +823,24 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchJobDefaultOrder : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchJobDefaultOrder(string value) { throw null; } + public static Azure.Compute.Batch.BatchJobDefaultOrder CreationTime { get { throw null; } } + public static Azure.Compute.Batch.BatchJobDefaultOrder None { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchJobDefaultOrder other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchJobDefaultOrder left, Azure.Compute.Batch.BatchJobDefaultOrder right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchJobDefaultOrder (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchJobDefaultOrder left, Azure.Compute.Batch.BatchJobDefaultOrder right) { throw null; } + public override string ToString() { throw null; } + } public partial class BatchJobDisableOptions : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public BatchJobDisableOptions(Azure.Compute.Batch.DisableBatchJobOption disableTasks) { } @@ -1411,11 +1289,11 @@ public partial class BatchNode : System.ClientModel.Primitives.IJsonModel CertificateReferences { get { throw null; } } public Azure.Compute.Batch.BatchNodeEndpointConfiguration EndpointConfiguration { get { throw null; } } public System.Collections.Generic.IReadOnlyList Errors { get { throw null; } } public string Id { get { throw null; } } public System.Net.IPAddress IpAddress { get { throw null; } } + public System.Net.IPAddress Ipv6Address { get { throw null; } } public bool? IsDedicated { get { throw null; } } public System.DateTimeOffset? LastBootTime { get { throw null; } } public Azure.Compute.Batch.BatchNodeAgentInfo NodeAgentInfo { get { throw null; } } @@ -1451,25 +1329,6 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchNodeCommunicationMode : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchNodeCommunicationMode(string value) { throw null; } - public static Azure.Compute.Batch.BatchNodeCommunicationMode Classic { get { throw null; } } - public static Azure.Compute.Batch.BatchNodeCommunicationMode Default { get { throw null; } } - public static Azure.Compute.Batch.BatchNodeCommunicationMode Simplified { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchNodeCommunicationMode other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchNodeCommunicationMode left, Azure.Compute.Batch.BatchNodeCommunicationMode right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchNodeCommunicationMode (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchNodeCommunicationMode left, Azure.Compute.Batch.BatchNodeCommunicationMode right) { throw null; } - public override string ToString() { throw null; } - } public partial class BatchNodeCounts : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { internal BatchNodeCounts() { } @@ -1755,6 +1614,8 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer public partial class BatchNodeRemoteLoginSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { internal BatchNodeRemoteLoginSettings() { } + public System.Net.IPAddress Ipv6RemoteLoginIpAddress { get { throw null; } } + public int? Ipv6RemoteLoginPort { get { throw null; } } public System.Net.IPAddress RemoteLoginIpAddress { get { throw null; } } public int RemoteLoginPort { get { throw null; } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } @@ -1874,11 +1735,9 @@ internal BatchPool() { } public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } } public string AutoScaleFormula { get { throw null; } } public Azure.Compute.Batch.AutoScaleRun AutoScaleRun { get { throw null; } } - public System.Collections.Generic.IReadOnlyList CertificateReferences { get { throw null; } } public System.DateTimeOffset? CreationTime { get { throw null; } } public int? CurrentDedicatedNodes { get { throw null; } } public int? CurrentLowPriorityNodes { get { throw null; } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? CurrentNodeCommunicationMode { get { throw null; } } public string DisplayName { get { throw null; } } public bool? EnableAutoScale { get { throw null; } } public bool? EnableInterNodeCommunication { get { throw null; } } @@ -1892,13 +1751,11 @@ internal BatchPool() { } public Azure.Compute.Batch.BatchPoolStatistics PoolStatistics { get { throw null; } } public System.Collections.Generic.IReadOnlyList ResizeErrors { get { throw null; } } public System.TimeSpan? ResizeTimeout { get { throw null; } } - public System.Collections.Generic.IReadOnlyDictionary ResourceTags { get { throw null; } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } } public Azure.Compute.Batch.BatchPoolState? State { get { throw null; } } public System.DateTimeOffset? StateTransitionTime { get { throw null; } } public int? TargetDedicatedNodes { get { throw null; } } public int? TargetLowPriorityNodes { get { throw null; } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } } public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } } public int? TaskSlotsPerNode { get { throw null; } } public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } } @@ -1919,7 +1776,6 @@ public BatchPoolCreateOptions(string id, string vmSize) { } public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } set { } } public string AutoScaleFormula { get { throw null; } set { } } - public System.Collections.Generic.IList CertificateReferences { get { throw null; } } public string DisplayName { get { throw null; } set { } } public bool? EnableAutoScale { get { throw null; } set { } } public bool? EnableInterNodeCommunication { get { throw null; } set { } } @@ -1928,11 +1784,9 @@ public BatchPoolCreateOptions(string id, string vmSize) { } public System.Collections.Generic.IList MountConfiguration { get { throw null; } } public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } set { } } public System.TimeSpan? ResizeTimeout { get { throw null; } set { } } - public System.Collections.Generic.IDictionary ResourceTags { get { throw null; } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } public int? TargetDedicatedNodes { get { throw null; } set { } } public int? TargetLowPriorityNodes { get { throw null; } set { } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } set { } } public int? TaskSlotsPerNode { get { throw null; } set { } } public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } set { } } @@ -1992,6 +1846,17 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } + public partial class BatchPoolIdentityReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolIdentityReference() { } + public Azure.Core.ResourceIdentifier ResourceId { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolIdentityReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolIdentityReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] public readonly partial struct BatchPoolIdentityType : System.IEquatable { @@ -2055,12 +1920,10 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer } public partial class BatchPoolReplaceOptions : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { - public BatchPoolReplaceOptions(System.Collections.Generic.IEnumerable certificateReferences, System.Collections.Generic.IEnumerable applicationPackageReferences, System.Collections.Generic.IEnumerable metadata) { } + public BatchPoolReplaceOptions(System.Collections.Generic.IEnumerable applicationPackageReferences, System.Collections.Generic.IEnumerable metadata) { } public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } - public System.Collections.Generic.IList CertificateReferences { get { throw null; } } public System.Collections.Generic.IList Metadata { get { throw null; } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.BatchPoolReplaceOptions System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } @@ -2111,7 +1974,6 @@ public BatchPoolSpecification(string vmSize) { } public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } set { } } public string AutoScaleFormula { get { throw null; } set { } } - public System.Collections.Generic.IList CertificateReferences { get { throw null; } } public string DisplayName { get { throw null; } set { } } public bool? EnableAutoScale { get { throw null; } set { } } public bool? EnableInterNodeCommunication { get { throw null; } set { } } @@ -2119,11 +1981,9 @@ public BatchPoolSpecification(string vmSize) { } public System.Collections.Generic.IList MountConfiguration { get { throw null; } } public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } set { } } public System.TimeSpan? ResizeTimeout { get { throw null; } set { } } - public string ResourceTags { get { throw null; } set { } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } public int? TargetDedicatedNodes { get { throw null; } set { } } public int? TargetLowPriorityNodes { get { throw null; } set { } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } set { } } public int? TaskSlotsPerNode { get { throw null; } set { } } public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } set { } } @@ -2174,15 +2034,12 @@ public partial class BatchPoolUpdateOptions : System.ClientModel.Primitives.IJso { public BatchPoolUpdateOptions() { } public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } - public System.Collections.Generic.IList CertificateReferences { get { throw null; } } public string DisplayName { get { throw null; } set { } } public bool? EnableInterNodeCommunication { get { throw null; } set { } } public System.Collections.Generic.IList Metadata { get { throw null; } } public System.Collections.Generic.IList MountConfiguration { get { throw null; } } public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } set { } } - public System.Collections.Generic.IDictionary ResourceTags { get { throw null; } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } set { } } public int? TaskSlotsPerNode { get { throw null; } set { } } public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } set { } } @@ -2229,6 +2086,8 @@ public partial class BatchPublicIpAddressConfiguration : System.ClientModel.Prim public BatchPublicIpAddressConfiguration() { } public System.Collections.Generic.IList IpAddressIds { get { throw null; } } public Azure.Compute.Batch.IpAddressProvisioningType? IpAddressProvisioningType { get { throw null; } set { } } + public System.Collections.Generic.IList IpFamilies { get { throw null; } } + public System.Collections.Generic.IList IpTags { get { throw null; } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.BatchPublicIpAddressConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } @@ -2637,6 +2496,7 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer public partial class BatchTaskSchedulingPolicy : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public BatchTaskSchedulingPolicy(Azure.Compute.Batch.BatchNodeFillType nodeFillType) { } + public Azure.Compute.Batch.BatchJobDefaultOrder? JobDefaultOrder { get { throw null; } set { } } public Azure.Compute.Batch.BatchNodeFillType NodeFillType { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.BatchTaskSchedulingPolicy System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } @@ -2794,8 +2654,6 @@ public static partial class ComputeBatchModelFactory public static Azure.Compute.Batch.AutoScaleRun AutoScaleRun(System.DateTimeOffset timestamp = default(System.DateTimeOffset), string results = null, Azure.Compute.Batch.AutoScaleRunError error = null) { throw null; } public static Azure.Compute.Batch.AutoScaleRunError AutoScaleRunError(string code = null, string message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } public static Azure.Compute.Batch.BatchApplication BatchApplication(string id = null, string displayName = null, System.Collections.Generic.IEnumerable versions = null) { throw null; } - public static Azure.Compute.Batch.BatchCertificate BatchCertificate(string thumbprint = null, string thumbprintAlgorithm = null, System.Uri uri = null, Azure.Compute.Batch.BatchCertificateState? state = default(Azure.Compute.Batch.BatchCertificateState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchCertificateState? previousState = default(Azure.Compute.Batch.BatchCertificateState?), System.DateTimeOffset? previousStateTransitionTime = default(System.DateTimeOffset?), string publicData = null, Azure.Compute.Batch.BatchCertificateDeleteError deleteCertificateError = null, System.BinaryData data = null, Azure.Compute.Batch.BatchCertificateFormat? certificateFormat = default(Azure.Compute.Batch.BatchCertificateFormat?), string password = null) { throw null; } - public static Azure.Compute.Batch.BatchCertificateDeleteError BatchCertificateDeleteError(string code = null, string message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } public static Azure.Compute.Batch.BatchCreateTaskCollectionResult BatchCreateTaskCollectionResult(System.Collections.Generic.IEnumerable values = null) { throw null; } public static Azure.Compute.Batch.BatchError BatchError(string code = null, Azure.Compute.Batch.BatchErrorMessage message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } public static Azure.Compute.Batch.BatchErrorDetail BatchErrorDetail(string key = null, string value = null) { throw null; } @@ -2812,18 +2670,18 @@ public static partial class ComputeBatchModelFactory public static Azure.Compute.Batch.BatchJobScheduleStatistics BatchJobScheduleStatistics(System.Uri uri = null, System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), System.TimeSpan userCpuTime = default(System.TimeSpan), System.TimeSpan kernelCpuTime = default(System.TimeSpan), System.TimeSpan wallClockTime = default(System.TimeSpan), long readIops = (long)0, long writeIops = (long)0, float readIoGiB = 0f, float writeIoGiB = 0f, long succeededTasksCount = (long)0, long failedTasksCount = (long)0, long taskRetriesCount = (long)0, System.TimeSpan waitTime = default(System.TimeSpan)) { throw null; } public static Azure.Compute.Batch.BatchJobSchedulingError BatchJobSchedulingError(Azure.Compute.Batch.BatchErrorSourceCategory category = default(Azure.Compute.Batch.BatchErrorSourceCategory), string code = null, string message = null, System.Collections.Generic.IEnumerable details = null) { throw null; } public static Azure.Compute.Batch.BatchJobStatistics BatchJobStatistics(System.Uri uri = null, System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), System.TimeSpan userCpuTime = default(System.TimeSpan), System.TimeSpan kernelCpuTime = default(System.TimeSpan), System.TimeSpan wallClockTime = default(System.TimeSpan), long readIops = (long)0, long writeIops = (long)0, float readIoGiB = 0f, float writeIoGiB = 0f, long succeededTasksCount = (long)0, long failedTasksCount = (long)0, long taskRetriesCount = (long)0, System.TimeSpan waitTime = default(System.TimeSpan)) { throw null; } - public static Azure.Compute.Batch.BatchNode BatchNode(string id = null, System.Uri uri = null, Azure.Compute.Batch.BatchNodeState? state = default(Azure.Compute.Batch.BatchNodeState?), Azure.Compute.Batch.SchedulingState? schedulingState = default(Azure.Compute.Batch.SchedulingState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), System.DateTimeOffset? lastBootTime = default(System.DateTimeOffset?), System.DateTimeOffset? allocationTime = default(System.DateTimeOffset?), System.Net.IPAddress ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = default(int?), int? runningTasksCount = default(int?), int? runningTaskSlotsCount = default(int?), int? totalTasksSucceeded = default(int?), System.Collections.Generic.IEnumerable recentTasks = null, Azure.Compute.Batch.BatchStartTask startTask = null, Azure.Compute.Batch.BatchStartTaskInfo startTaskInfo = null, System.Collections.Generic.IEnumerable certificateReferences = null, System.Collections.Generic.IEnumerable errors = null, bool? isDedicated = default(bool?), Azure.Compute.Batch.BatchNodeEndpointConfiguration endpointConfiguration = null, Azure.Compute.Batch.BatchNodeAgentInfo nodeAgentInfo = null, Azure.Compute.Batch.VirtualMachineInfo virtualMachineInfo = null) { throw null; } + public static Azure.Compute.Batch.BatchNode BatchNode(string id = null, System.Uri uri = null, Azure.Compute.Batch.BatchNodeState? state = default(Azure.Compute.Batch.BatchNodeState?), Azure.Compute.Batch.SchedulingState? schedulingState = default(Azure.Compute.Batch.SchedulingState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), System.DateTimeOffset? lastBootTime = default(System.DateTimeOffset?), System.DateTimeOffset? allocationTime = default(System.DateTimeOffset?), System.Net.IPAddress ipAddress = null, System.Net.IPAddress ipv6Address = null, string affinityId = null, string vmSize = null, int? totalTasksRun = default(int?), int? runningTasksCount = default(int?), int? runningTaskSlotsCount = default(int?), int? totalTasksSucceeded = default(int?), System.Collections.Generic.IEnumerable recentTasks = null, Azure.Compute.Batch.BatchStartTask startTask = null, Azure.Compute.Batch.BatchStartTaskInfo startTaskInfo = null, System.Collections.Generic.IEnumerable errors = null, bool? isDedicated = default(bool?), Azure.Compute.Batch.BatchNodeEndpointConfiguration endpointConfiguration = null, Azure.Compute.Batch.BatchNodeAgentInfo nodeAgentInfo = null, Azure.Compute.Batch.VirtualMachineInfo virtualMachineInfo = null) { throw null; } public static Azure.Compute.Batch.BatchNodeAgentInfo BatchNodeAgentInfo(string version = null, System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset)) { throw null; } public static Azure.Compute.Batch.BatchNodeCounts BatchNodeCounts(int creating = 0, int idle = 0, int offline = 0, int preempted = 0, int rebooting = 0, int reimaging = 0, int running = 0, int starting = 0, int startTaskFailed = 0, int leavingPool = 0, int unknown = 0, int unusable = 0, int waitingForStartTask = 0, int deallocated = 0, int deallocating = 0, int total = 0, int upgradingOs = 0) { throw null; } public static Azure.Compute.Batch.BatchNodeEndpointConfiguration BatchNodeEndpointConfiguration(System.Collections.Generic.IEnumerable inboundEndpoints = null) { throw null; } public static Azure.Compute.Batch.BatchNodeError BatchNodeError(string code = null, string message = null, System.Collections.Generic.IEnumerable errorDetails = null) { throw null; } public static Azure.Compute.Batch.BatchNodeFile BatchNodeFile(string name = null, System.Uri uri = null, bool? isDirectory = default(bool?), Azure.Compute.Batch.FileProperties properties = null) { throw null; } public static Azure.Compute.Batch.BatchNodeInfo BatchNodeInfo(string affinityId = null, System.Uri nodeUri = null, string poolId = null, string nodeId = null, string taskRootDirectory = null, System.Uri taskRootDirectoryUri = null) { throw null; } - public static Azure.Compute.Batch.BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(System.Net.IPAddress remoteLoginIpAddress = null, int remoteLoginPort = 0) { throw null; } + public static Azure.Compute.Batch.BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(System.Net.IPAddress ipv6RemoteLoginIpAddress = null, int? ipv6RemoteLoginPort = default(int?), System.Net.IPAddress remoteLoginIpAddress = null, int remoteLoginPort = 0) { throw null; } public static Azure.Compute.Batch.BatchNodeUserCreateOptions BatchNodeUserCreateOptions(string name = null, bool? isAdmin = default(bool?), System.DateTimeOffset? expiryTime = default(System.DateTimeOffset?), string password = null, string sshPublicKey = null) { throw null; } public static Azure.Compute.Batch.BatchNodeVMExtension BatchNodeVMExtension(string provisioningState = null, Azure.Compute.Batch.VMExtension vmExtension = null, Azure.Compute.Batch.VMExtensionInstanceView instanceView = null) { throw null; } - public static Azure.Compute.Batch.BatchPool BatchPool(string id = null, string displayName = null, System.Uri uri = null, Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchPoolState? state = default(Azure.Compute.Batch.BatchPoolState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.AllocationState? allocationState = default(Azure.Compute.Batch.AllocationState?), System.DateTimeOffset? allocationStateTransitionTime = default(System.DateTimeOffset?), string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), System.Collections.Generic.IEnumerable resizeErrors = null, System.Collections.Generic.IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = default(int?), int? currentLowPriorityNodes = default(int?), int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), Azure.Compute.Batch.AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable certificateReferences = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, Azure.Compute.Batch.BatchPoolStatistics poolStatistics = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.BatchPoolIdentity identity = null, Azure.Compute.Batch.BatchNodeCommunicationMode? targetNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.BatchNodeCommunicationMode? currentNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } - public static Azure.Compute.Batch.BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, string displayName = null, string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), System.Collections.Generic.IDictionary resourceTags = null, int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable certificateReferences = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.BatchNodeCommunicationMode? targetNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } + public static Azure.Compute.Batch.BatchPool BatchPool(string id = null, string displayName = null, System.Uri uri = null, Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchPoolState? state = default(Azure.Compute.Batch.BatchPoolState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.AllocationState? allocationState = default(Azure.Compute.Batch.AllocationState?), System.DateTimeOffset? allocationStateTransitionTime = default(System.DateTimeOffset?), string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), System.Collections.Generic.IEnumerable resizeErrors = null, int? currentDedicatedNodes = default(int?), int? currentLowPriorityNodes = default(int?), int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), Azure.Compute.Batch.AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, Azure.Compute.Batch.BatchPoolStatistics poolStatistics = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.BatchPoolIdentity identity = null, Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } + public static Azure.Compute.Batch.BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, string displayName = null, string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } public static Azure.Compute.Batch.BatchPoolIdentity BatchPoolIdentity(Azure.Compute.Batch.BatchPoolIdentityType type = default(Azure.Compute.Batch.BatchPoolIdentityType), System.Collections.Generic.IEnumerable userAssignedIdentities = null) { throw null; } public static Azure.Compute.Batch.BatchPoolNodeCounts BatchPoolNodeCounts(string poolId = null, Azure.Compute.Batch.BatchNodeCounts dedicated = null, Azure.Compute.Batch.BatchNodeCounts lowPriority = null) { throw null; } public static Azure.Compute.Batch.BatchPoolResourceStatistics BatchPoolResourceStatistics(System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), float avgCpuPercentage = 0f, float avgMemoryGiB = 0f, float peakMemoryGiB = 0f, float avgDiskGiB = 0f, float peakDiskGiB = 0f, long diskReadIops = (long)0, long diskWriteIops = (long)0, float diskReadGiB = 0f, float diskWriteGiB = 0f, float networkReadGiB = 0f, float networkWriteGiB = 0f) { throw null; } @@ -2976,6 +2834,7 @@ public DataDisk(int logicalUnitNumber, int diskSizeGb) { } public Azure.Compute.Batch.CachingType? Caching { get { throw null; } set { } } public int DiskSizeGb { get { throw null; } set { } } public int LogicalUnitNumber { get { throw null; } set { } } + public Azure.Compute.Batch.ManagedDisk ManagedDisk { get { throw null; } set { } } public Azure.Compute.Batch.StorageAccountType? StorageAccountType { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.DataDisk System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } @@ -3003,18 +2862,6 @@ public partial class DefaultCreateTaskResultHandler : Azure.Compute.Batch.TaskRe public DefaultCreateTaskResultHandler() { } public override Azure.Compute.Batch.CreateTaskResultStatus CreateTaskResultHandler(Azure.Compute.Batch.CreateTaskResult addTaskResult, System.Threading.CancellationToken cancellationToken) { throw null; } } - public partial class DeleteCertificateOperation : Azure.Operation - { - protected DeleteCertificateOperation() { } - public DeleteCertificateOperation(Azure.Compute.Batch.BatchClient client, string id) { } - public override bool HasCompleted { get { throw null; } } - public override bool HasValue { get { throw null; } } - public override string Id { get { throw null; } } - public override bool Value { get { throw null; } } - public override Azure.Response GetRawResponse() { throw null; } - public override Azure.Response UpdateStatus(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public override System.Threading.Tasks.ValueTask UpdateStatusAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - } public partial class DeleteJobOperation : Azure.Operation { protected DeleteJobOperation() { } @@ -3117,9 +2964,23 @@ public DisableJobOperation(Azure.Compute.Batch.BatchClient client, string id) { public override Azure.Response UpdateStatus(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public override System.Threading.Tasks.ValueTask UpdateStatusAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } } + public partial class DiskCustomerManagedKey : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public DiskCustomerManagedKey() { } + public Azure.Compute.Batch.BatchPoolIdentityReference IdentityReference { get { throw null; } set { } } + public string KeyUrl { get { throw null; } set { } } + public bool? RotationToLatestKeyVersionEnabled { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskCustomerManagedKey System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskCustomerManagedKey System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } public partial class DiskEncryptionConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public DiskEncryptionConfiguration() { } + public Azure.Compute.Batch.DiskCustomerManagedKey CustomerManagedKey { get { throw null; } set { } } public System.Collections.Generic.IList Targets { get { throw null; } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.DiskEncryptionConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } @@ -3128,6 +2989,17 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } + public partial class DiskEncryptionSetParameters : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public DiskEncryptionSetParameters() { } + public Azure.Core.ResourceIdentifier Id { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskEncryptionSetParameters System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskEncryptionSetParameters System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] public readonly partial struct DiskEncryptionTarget : System.IEquatable { @@ -3273,6 +3145,36 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } + public partial class HostEndpointSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public HostEndpointSettings() { } + public string InVmAccessControlProfileReferenceId { get { throw null; } set { } } + public Azure.Compute.Batch.HostEndpointSettingsModeTypes? Mode { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.HostEndpointSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.HostEndpointSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct HostEndpointSettingsModeTypes : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public HostEndpointSettingsModeTypes(string value) { throw null; } + public static Azure.Compute.Batch.HostEndpointSettingsModeTypes Audit { get { throw null; } } + public static Azure.Compute.Batch.HostEndpointSettingsModeTypes Enforce { get { throw null; } } + public bool Equals(Azure.Compute.Batch.HostEndpointSettingsModeTypes other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.HostEndpointSettingsModeTypes left, Azure.Compute.Batch.HostEndpointSettingsModeTypes right) { throw null; } + public static implicit operator Azure.Compute.Batch.HostEndpointSettingsModeTypes (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.HostEndpointSettingsModeTypes left, Azure.Compute.Batch.HostEndpointSettingsModeTypes right) { throw null; } + public override string ToString() { throw null; } + } [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] public readonly partial struct ImageVerificationType : System.IEquatable { @@ -3359,6 +3261,36 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer public static bool operator !=(Azure.Compute.Batch.IpAddressProvisioningType left, Azure.Compute.Batch.IpAddressProvisioningType right) { throw null; } public override string ToString() { throw null; } } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct IPFamily : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public IPFamily(string value) { throw null; } + public static Azure.Compute.Batch.IPFamily IPv4 { get { throw null; } } + public static Azure.Compute.Batch.IPFamily IPv6 { get { throw null; } } + public bool Equals(Azure.Compute.Batch.IPFamily other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.IPFamily left, Azure.Compute.Batch.IPFamily right) { throw null; } + public static implicit operator Azure.Compute.Batch.IPFamily (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.IPFamily left, Azure.Compute.Batch.IPFamily right) { throw null; } + public override string ToString() { throw null; } + } + public partial class IPTag : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public IPTag() { } + public string IpTagType { get { throw null; } set { } } + public string Tag { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.IPTag System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.IPTag System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } public partial class LinuxUserConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public LinuxUserConfiguration() { } @@ -3393,6 +3325,7 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer public partial class ManagedDisk : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public ManagedDisk() { } + public Azure.Compute.Batch.DiskEncryptionSetParameters DiskEncryptionSet { get { throw null; } set { } } public Azure.Compute.Batch.BatchVmDiskSecurityProfile SecurityProfile { get { throw null; } set { } } public Azure.Compute.Batch.StorageAccountType? StorageAccountType { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } @@ -3604,6 +3537,19 @@ public partial class ParallelOperationsException : System.AggregateException internal ParallelOperationsException() { } public override string ToString() { throw null; } } + public partial class ProxyAgentSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ProxyAgentSettings() { } + public bool? Enabled { get { throw null; } set { } } + public Azure.Compute.Batch.HostEndpointSettings Imds { get { throw null; } set { } } + public Azure.Compute.Batch.HostEndpointSettings WireServer { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ProxyAgentSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ProxyAgentSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } public partial class RebootNodeOperation : Azure.Operation { protected RebootNodeOperation() { } @@ -3745,6 +3691,7 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer private readonly object _dummy; private readonly int _dummyPrimitive; public SecurityEncryptionTypes(string value) { throw null; } + public static Azure.Compute.Batch.SecurityEncryptionTypes DiskWithVMGuestState { get { throw null; } } public static Azure.Compute.Batch.SecurityEncryptionTypes NonPersistedTPM { get { throw null; } } public static Azure.Compute.Batch.SecurityEncryptionTypes VMGuestStateOnly { get { throw null; } } public bool Equals(Azure.Compute.Batch.SecurityEncryptionTypes other) { throw null; } @@ -3761,6 +3708,7 @@ public partial class SecurityProfile : System.ClientModel.Primitives.IJsonModel< { public SecurityProfile() { } public bool? EncryptionAtHost { get { throw null; } set { } } + public Azure.Compute.Batch.ProxyAgentSettings ProxyAgentSettings { get { throw null; } set { } } public Azure.Compute.Batch.SecurityTypes? SecurityType { get { throw null; } set { } } public Azure.Compute.Batch.BatchUefiSettings UefiSettings { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } diff --git a/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.netstandard2.0.cs b/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.netstandard2.0.cs index 3048ee262527..277c731e4053 100644 --- a/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.netstandard2.0.cs +++ b/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.netstandard2.0.cs @@ -228,130 +228,6 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } - public partial class BatchCertificate : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel - { - public BatchCertificate(string thumbprint, string thumbprintAlgorithm, System.BinaryData data) { } - public Azure.Compute.Batch.BatchCertificateFormat? CertificateFormat { get { throw null; } set { } } - public System.BinaryData Data { get { throw null; } set { } } - public Azure.Compute.Batch.BatchCertificateDeleteError DeleteCertificateError { get { throw null; } } - public string Password { get { throw null; } set { } } - public Azure.Compute.Batch.BatchCertificateState? PreviousState { get { throw null; } } - public System.DateTimeOffset? PreviousStateTransitionTime { get { throw null; } } - public string PublicData { get { throw null; } } - public Azure.Compute.Batch.BatchCertificateState? State { get { throw null; } } - public System.DateTimeOffset? StateTransitionTime { get { throw null; } } - public string Thumbprint { get { throw null; } set { } } - public string ThumbprintAlgorithm { get { throw null; } set { } } - public System.Uri Uri { get { throw null; } } - protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificate System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificate System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - } - public partial class BatchCertificateDeleteError : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel - { - internal BatchCertificateDeleteError() { } - public string Code { get { throw null; } } - public string Message { get { throw null; } } - public System.Collections.Generic.IReadOnlyList Values { get { throw null; } } - protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificateDeleteError System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificateDeleteError System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchCertificateFormat : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchCertificateFormat(string value) { throw null; } - public static Azure.Compute.Batch.BatchCertificateFormat Cer { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateFormat Pfx { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchCertificateFormat other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchCertificateFormat left, Azure.Compute.Batch.BatchCertificateFormat right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchCertificateFormat (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchCertificateFormat left, Azure.Compute.Batch.BatchCertificateFormat right) { throw null; } - public override string ToString() { throw null; } - } - public partial class BatchCertificateReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel - { - public BatchCertificateReference(string thumbprint, string thumbprintAlgorithm) { } - public Azure.Compute.Batch.BatchCertificateStoreLocation? StoreLocation { get { throw null; } set { } } - public string StoreName { get { throw null; } set { } } - public string Thumbprint { get { throw null; } set { } } - public string ThumbprintAlgorithm { get { throw null; } set { } } - public System.Collections.Generic.IList Visibility { get { throw null; } } - protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificateReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } - Azure.Compute.Batch.BatchCertificateReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } - } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchCertificateState : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchCertificateState(string value) { throw null; } - public static Azure.Compute.Batch.BatchCertificateState Active { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateState DeleteFailed { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateState Deleting { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchCertificateState other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchCertificateState left, Azure.Compute.Batch.BatchCertificateState right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchCertificateState (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchCertificateState left, Azure.Compute.Batch.BatchCertificateState right) { throw null; } - public override string ToString() { throw null; } - } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchCertificateStoreLocation : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchCertificateStoreLocation(string value) { throw null; } - public static Azure.Compute.Batch.BatchCertificateStoreLocation CurrentUser { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateStoreLocation LocalMachine { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchCertificateStoreLocation other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchCertificateStoreLocation left, Azure.Compute.Batch.BatchCertificateStoreLocation right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchCertificateStoreLocation (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchCertificateStoreLocation left, Azure.Compute.Batch.BatchCertificateStoreLocation right) { throw null; } - public override string ToString() { throw null; } - } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchCertificateVisibility : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchCertificateVisibility(string value) { throw null; } - public static Azure.Compute.Batch.BatchCertificateVisibility RemoteUser { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateVisibility StartTask { get { throw null; } } - public static Azure.Compute.Batch.BatchCertificateVisibility Task { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchCertificateVisibility other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchCertificateVisibility left, Azure.Compute.Batch.BatchCertificateVisibility right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchCertificateVisibility (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchCertificateVisibility left, Azure.Compute.Batch.BatchCertificateVisibility right) { throw null; } - public override string ToString() { throw null; } - } public partial class BatchClient { protected BatchClient() { } @@ -360,12 +236,6 @@ public BatchClient(System.Uri endpoint, Azure.AzureNamedKeyCredential credential public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential) { } public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, Azure.Compute.Batch.BatchClientOptions options) { } public virtual Azure.Core.Pipeline.HttpPipeline Pipeline { get { throw null; } } - public virtual Azure.Response CancelCertificateDeletion(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } - public virtual System.Threading.Tasks.Task CancelCertificateDeletionAsync(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } - public virtual Azure.Response CreateCertificate(Azure.Compute.Batch.BatchCertificate certificate, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Response CreateCertificate(Azure.Core.RequestContent content, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } - public virtual System.Threading.Tasks.Task CreateCertificateAsync(Azure.Compute.Batch.BatchCertificate certificate, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task CreateCertificateAsync(Azure.Core.RequestContent content, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } public virtual Azure.Response CreateJob(Azure.Compute.Batch.BatchJobCreateOptions job, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateJob(Azure.Core.RequestContent content, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } public virtual System.Threading.Tasks.Task CreateJobAsync(Azure.Compute.Batch.BatchJobCreateOptions job, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -394,8 +264,6 @@ public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, A public virtual System.Threading.Tasks.Task CreateTasksAsync(string jobId, System.Collections.Generic.IEnumerable tasksToAdd, Azure.Compute.Batch.CreateTasksOptions createTasksOptions = null, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Compute.Batch.DeallocateNodeOperation DeallocateNode(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeDeallocateOptions parameters = null, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task DeallocateNodeAsync(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeDeallocateOptions parameters = null, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Compute.Batch.DeleteCertificateOperation DeleteCertificate(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } - public virtual System.Threading.Tasks.Task DeleteCertificateAsync(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } public virtual Azure.Compute.Batch.DeleteJobOperation DeleteJob(string jobId, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), bool? force = default(bool?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } public virtual System.Threading.Tasks.Task DeleteJobAsync(string jobId, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), bool? force = default(bool?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } public virtual Azure.Compute.Batch.DeleteJobScheduleOperation DeleteJobSchedule(string jobScheduleId, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), bool? force = default(bool?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } @@ -442,14 +310,6 @@ public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, A public virtual Azure.Pageable GetApplications(System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.AsyncPageable GetApplicationsAsync(System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, int? maxresults, Azure.RequestContext context) { throw null; } public virtual Azure.AsyncPageable GetApplicationsAsync(System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Response GetCertificate(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } - public virtual Azure.Response GetCertificate(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } - public virtual System.Threading.Tasks.Task> GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Pageable GetCertificates(System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } - public virtual Azure.Pageable GetCertificates(System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.AsyncPageable GetCertificatesAsync(System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } - public virtual Azure.AsyncPageable GetCertificatesAsync(System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response GetJob(string jobId, System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } public virtual Azure.Response GetJob(string jobId, System.TimeSpan? timeOutInSeconds = default(System.TimeSpan?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task GetJobAsync(string jobId, System.TimeSpan? timeOutInSeconds, System.DateTimeOffset? ocpDate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } @@ -613,10 +473,10 @@ public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, A } public partial class BatchClientOptions : Azure.Core.ClientOptions { - public BatchClientOptions(Azure.Compute.Batch.BatchClientOptions.ServiceVersion version = Azure.Compute.Batch.BatchClientOptions.ServiceVersion.V2024_07_01_20_0) { } + public BatchClientOptions(Azure.Compute.Batch.BatchClientOptions.ServiceVersion version = Azure.Compute.Batch.BatchClientOptions.ServiceVersion.V2025_06_01) { } public enum ServiceVersion { - V2024_07_01_20_0 = 1, + V2025_06_01 = 1, } } public partial class BatchContainerConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel @@ -963,6 +823,24 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchJobDefaultOrder : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchJobDefaultOrder(string value) { throw null; } + public static Azure.Compute.Batch.BatchJobDefaultOrder CreationTime { get { throw null; } } + public static Azure.Compute.Batch.BatchJobDefaultOrder None { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchJobDefaultOrder other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchJobDefaultOrder left, Azure.Compute.Batch.BatchJobDefaultOrder right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchJobDefaultOrder (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchJobDefaultOrder left, Azure.Compute.Batch.BatchJobDefaultOrder right) { throw null; } + public override string ToString() { throw null; } + } public partial class BatchJobDisableOptions : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public BatchJobDisableOptions(Azure.Compute.Batch.DisableBatchJobOption disableTasks) { } @@ -1411,11 +1289,11 @@ public partial class BatchNode : System.ClientModel.Primitives.IJsonModel CertificateReferences { get { throw null; } } public Azure.Compute.Batch.BatchNodeEndpointConfiguration EndpointConfiguration { get { throw null; } } public System.Collections.Generic.IReadOnlyList Errors { get { throw null; } } public string Id { get { throw null; } } public System.Net.IPAddress IpAddress { get { throw null; } } + public System.Net.IPAddress Ipv6Address { get { throw null; } } public bool? IsDedicated { get { throw null; } } public System.DateTimeOffset? LastBootTime { get { throw null; } } public Azure.Compute.Batch.BatchNodeAgentInfo NodeAgentInfo { get { throw null; } } @@ -1451,25 +1329,6 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public readonly partial struct BatchNodeCommunicationMode : System.IEquatable - { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public BatchNodeCommunicationMode(string value) { throw null; } - public static Azure.Compute.Batch.BatchNodeCommunicationMode Classic { get { throw null; } } - public static Azure.Compute.Batch.BatchNodeCommunicationMode Default { get { throw null; } } - public static Azure.Compute.Batch.BatchNodeCommunicationMode Simplified { get { throw null; } } - public bool Equals(Azure.Compute.Batch.BatchNodeCommunicationMode other) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override bool Equals(object obj) { throw null; } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public override int GetHashCode() { throw null; } - public static bool operator ==(Azure.Compute.Batch.BatchNodeCommunicationMode left, Azure.Compute.Batch.BatchNodeCommunicationMode right) { throw null; } - public static implicit operator Azure.Compute.Batch.BatchNodeCommunicationMode (string value) { throw null; } - public static bool operator !=(Azure.Compute.Batch.BatchNodeCommunicationMode left, Azure.Compute.Batch.BatchNodeCommunicationMode right) { throw null; } - public override string ToString() { throw null; } - } public partial class BatchNodeCounts : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { internal BatchNodeCounts() { } @@ -1755,6 +1614,8 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer public partial class BatchNodeRemoteLoginSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { internal BatchNodeRemoteLoginSettings() { } + public System.Net.IPAddress Ipv6RemoteLoginIpAddress { get { throw null; } } + public int? Ipv6RemoteLoginPort { get { throw null; } } public System.Net.IPAddress RemoteLoginIpAddress { get { throw null; } } public int RemoteLoginPort { get { throw null; } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } @@ -1874,11 +1735,9 @@ internal BatchPool() { } public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } } public string AutoScaleFormula { get { throw null; } } public Azure.Compute.Batch.AutoScaleRun AutoScaleRun { get { throw null; } } - public System.Collections.Generic.IReadOnlyList CertificateReferences { get { throw null; } } public System.DateTimeOffset? CreationTime { get { throw null; } } public int? CurrentDedicatedNodes { get { throw null; } } public int? CurrentLowPriorityNodes { get { throw null; } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? CurrentNodeCommunicationMode { get { throw null; } } public string DisplayName { get { throw null; } } public bool? EnableAutoScale { get { throw null; } } public bool? EnableInterNodeCommunication { get { throw null; } } @@ -1892,13 +1751,11 @@ internal BatchPool() { } public Azure.Compute.Batch.BatchPoolStatistics PoolStatistics { get { throw null; } } public System.Collections.Generic.IReadOnlyList ResizeErrors { get { throw null; } } public System.TimeSpan? ResizeTimeout { get { throw null; } } - public System.Collections.Generic.IReadOnlyDictionary ResourceTags { get { throw null; } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } } public Azure.Compute.Batch.BatchPoolState? State { get { throw null; } } public System.DateTimeOffset? StateTransitionTime { get { throw null; } } public int? TargetDedicatedNodes { get { throw null; } } public int? TargetLowPriorityNodes { get { throw null; } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } } public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } } public int? TaskSlotsPerNode { get { throw null; } } public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } } @@ -1919,7 +1776,6 @@ public BatchPoolCreateOptions(string id, string vmSize) { } public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } set { } } public string AutoScaleFormula { get { throw null; } set { } } - public System.Collections.Generic.IList CertificateReferences { get { throw null; } } public string DisplayName { get { throw null; } set { } } public bool? EnableAutoScale { get { throw null; } set { } } public bool? EnableInterNodeCommunication { get { throw null; } set { } } @@ -1928,11 +1784,9 @@ public BatchPoolCreateOptions(string id, string vmSize) { } public System.Collections.Generic.IList MountConfiguration { get { throw null; } } public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } set { } } public System.TimeSpan? ResizeTimeout { get { throw null; } set { } } - public System.Collections.Generic.IDictionary ResourceTags { get { throw null; } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } public int? TargetDedicatedNodes { get { throw null; } set { } } public int? TargetLowPriorityNodes { get { throw null; } set { } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } set { } } public int? TaskSlotsPerNode { get { throw null; } set { } } public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } set { } } @@ -1992,6 +1846,17 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } + public partial class BatchPoolIdentityReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolIdentityReference() { } + public Azure.Core.ResourceIdentifier ResourceId { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolIdentityReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolIdentityReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] public readonly partial struct BatchPoolIdentityType : System.IEquatable { @@ -2055,12 +1920,10 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer } public partial class BatchPoolReplaceOptions : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { - public BatchPoolReplaceOptions(System.Collections.Generic.IEnumerable certificateReferences, System.Collections.Generic.IEnumerable applicationPackageReferences, System.Collections.Generic.IEnumerable metadata) { } + public BatchPoolReplaceOptions(System.Collections.Generic.IEnumerable applicationPackageReferences, System.Collections.Generic.IEnumerable metadata) { } public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } - public System.Collections.Generic.IList CertificateReferences { get { throw null; } } public System.Collections.Generic.IList Metadata { get { throw null; } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.BatchPoolReplaceOptions System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } @@ -2111,7 +1974,6 @@ public BatchPoolSpecification(string vmSize) { } public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } set { } } public string AutoScaleFormula { get { throw null; } set { } } - public System.Collections.Generic.IList CertificateReferences { get { throw null; } } public string DisplayName { get { throw null; } set { } } public bool? EnableAutoScale { get { throw null; } set { } } public bool? EnableInterNodeCommunication { get { throw null; } set { } } @@ -2119,11 +1981,9 @@ public BatchPoolSpecification(string vmSize) { } public System.Collections.Generic.IList MountConfiguration { get { throw null; } } public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } set { } } public System.TimeSpan? ResizeTimeout { get { throw null; } set { } } - public string ResourceTags { get { throw null; } set { } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } public int? TargetDedicatedNodes { get { throw null; } set { } } public int? TargetLowPriorityNodes { get { throw null; } set { } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } set { } } public int? TaskSlotsPerNode { get { throw null; } set { } } public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } set { } } @@ -2174,15 +2034,12 @@ public partial class BatchPoolUpdateOptions : System.ClientModel.Primitives.IJso { public BatchPoolUpdateOptions() { } public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } - public System.Collections.Generic.IList CertificateReferences { get { throw null; } } public string DisplayName { get { throw null; } set { } } public bool? EnableInterNodeCommunication { get { throw null; } set { } } public System.Collections.Generic.IList Metadata { get { throw null; } } public System.Collections.Generic.IList MountConfiguration { get { throw null; } } public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } set { } } - public System.Collections.Generic.IDictionary ResourceTags { get { throw null; } } public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } - public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } set { } } public int? TaskSlotsPerNode { get { throw null; } set { } } public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } set { } } @@ -2229,6 +2086,8 @@ public partial class BatchPublicIpAddressConfiguration : System.ClientModel.Prim public BatchPublicIpAddressConfiguration() { } public System.Collections.Generic.IList IpAddressIds { get { throw null; } } public Azure.Compute.Batch.IpAddressProvisioningType? IpAddressProvisioningType { get { throw null; } set { } } + public System.Collections.Generic.IList IpFamilies { get { throw null; } } + public System.Collections.Generic.IList IpTags { get { throw null; } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.BatchPublicIpAddressConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } @@ -2637,6 +2496,7 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer public partial class BatchTaskSchedulingPolicy : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public BatchTaskSchedulingPolicy(Azure.Compute.Batch.BatchNodeFillType nodeFillType) { } + public Azure.Compute.Batch.BatchJobDefaultOrder? JobDefaultOrder { get { throw null; } set { } } public Azure.Compute.Batch.BatchNodeFillType NodeFillType { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.BatchTaskSchedulingPolicy System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } @@ -2794,8 +2654,6 @@ public static partial class ComputeBatchModelFactory public static Azure.Compute.Batch.AutoScaleRun AutoScaleRun(System.DateTimeOffset timestamp = default(System.DateTimeOffset), string results = null, Azure.Compute.Batch.AutoScaleRunError error = null) { throw null; } public static Azure.Compute.Batch.AutoScaleRunError AutoScaleRunError(string code = null, string message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } public static Azure.Compute.Batch.BatchApplication BatchApplication(string id = null, string displayName = null, System.Collections.Generic.IEnumerable versions = null) { throw null; } - public static Azure.Compute.Batch.BatchCertificate BatchCertificate(string thumbprint = null, string thumbprintAlgorithm = null, System.Uri uri = null, Azure.Compute.Batch.BatchCertificateState? state = default(Azure.Compute.Batch.BatchCertificateState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchCertificateState? previousState = default(Azure.Compute.Batch.BatchCertificateState?), System.DateTimeOffset? previousStateTransitionTime = default(System.DateTimeOffset?), string publicData = null, Azure.Compute.Batch.BatchCertificateDeleteError deleteCertificateError = null, System.BinaryData data = null, Azure.Compute.Batch.BatchCertificateFormat? certificateFormat = default(Azure.Compute.Batch.BatchCertificateFormat?), string password = null) { throw null; } - public static Azure.Compute.Batch.BatchCertificateDeleteError BatchCertificateDeleteError(string code = null, string message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } public static Azure.Compute.Batch.BatchCreateTaskCollectionResult BatchCreateTaskCollectionResult(System.Collections.Generic.IEnumerable values = null) { throw null; } public static Azure.Compute.Batch.BatchError BatchError(string code = null, Azure.Compute.Batch.BatchErrorMessage message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } public static Azure.Compute.Batch.BatchErrorDetail BatchErrorDetail(string key = null, string value = null) { throw null; } @@ -2812,18 +2670,18 @@ public static partial class ComputeBatchModelFactory public static Azure.Compute.Batch.BatchJobScheduleStatistics BatchJobScheduleStatistics(System.Uri uri = null, System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), System.TimeSpan userCpuTime = default(System.TimeSpan), System.TimeSpan kernelCpuTime = default(System.TimeSpan), System.TimeSpan wallClockTime = default(System.TimeSpan), long readIops = (long)0, long writeIops = (long)0, float readIoGiB = 0f, float writeIoGiB = 0f, long succeededTasksCount = (long)0, long failedTasksCount = (long)0, long taskRetriesCount = (long)0, System.TimeSpan waitTime = default(System.TimeSpan)) { throw null; } public static Azure.Compute.Batch.BatchJobSchedulingError BatchJobSchedulingError(Azure.Compute.Batch.BatchErrorSourceCategory category = default(Azure.Compute.Batch.BatchErrorSourceCategory), string code = null, string message = null, System.Collections.Generic.IEnumerable details = null) { throw null; } public static Azure.Compute.Batch.BatchJobStatistics BatchJobStatistics(System.Uri uri = null, System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), System.TimeSpan userCpuTime = default(System.TimeSpan), System.TimeSpan kernelCpuTime = default(System.TimeSpan), System.TimeSpan wallClockTime = default(System.TimeSpan), long readIops = (long)0, long writeIops = (long)0, float readIoGiB = 0f, float writeIoGiB = 0f, long succeededTasksCount = (long)0, long failedTasksCount = (long)0, long taskRetriesCount = (long)0, System.TimeSpan waitTime = default(System.TimeSpan)) { throw null; } - public static Azure.Compute.Batch.BatchNode BatchNode(string id = null, System.Uri uri = null, Azure.Compute.Batch.BatchNodeState? state = default(Azure.Compute.Batch.BatchNodeState?), Azure.Compute.Batch.SchedulingState? schedulingState = default(Azure.Compute.Batch.SchedulingState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), System.DateTimeOffset? lastBootTime = default(System.DateTimeOffset?), System.DateTimeOffset? allocationTime = default(System.DateTimeOffset?), System.Net.IPAddress ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = default(int?), int? runningTasksCount = default(int?), int? runningTaskSlotsCount = default(int?), int? totalTasksSucceeded = default(int?), System.Collections.Generic.IEnumerable recentTasks = null, Azure.Compute.Batch.BatchStartTask startTask = null, Azure.Compute.Batch.BatchStartTaskInfo startTaskInfo = null, System.Collections.Generic.IEnumerable certificateReferences = null, System.Collections.Generic.IEnumerable errors = null, bool? isDedicated = default(bool?), Azure.Compute.Batch.BatchNodeEndpointConfiguration endpointConfiguration = null, Azure.Compute.Batch.BatchNodeAgentInfo nodeAgentInfo = null, Azure.Compute.Batch.VirtualMachineInfo virtualMachineInfo = null) { throw null; } + public static Azure.Compute.Batch.BatchNode BatchNode(string id = null, System.Uri uri = null, Azure.Compute.Batch.BatchNodeState? state = default(Azure.Compute.Batch.BatchNodeState?), Azure.Compute.Batch.SchedulingState? schedulingState = default(Azure.Compute.Batch.SchedulingState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), System.DateTimeOffset? lastBootTime = default(System.DateTimeOffset?), System.DateTimeOffset? allocationTime = default(System.DateTimeOffset?), System.Net.IPAddress ipAddress = null, System.Net.IPAddress ipv6Address = null, string affinityId = null, string vmSize = null, int? totalTasksRun = default(int?), int? runningTasksCount = default(int?), int? runningTaskSlotsCount = default(int?), int? totalTasksSucceeded = default(int?), System.Collections.Generic.IEnumerable recentTasks = null, Azure.Compute.Batch.BatchStartTask startTask = null, Azure.Compute.Batch.BatchStartTaskInfo startTaskInfo = null, System.Collections.Generic.IEnumerable errors = null, bool? isDedicated = default(bool?), Azure.Compute.Batch.BatchNodeEndpointConfiguration endpointConfiguration = null, Azure.Compute.Batch.BatchNodeAgentInfo nodeAgentInfo = null, Azure.Compute.Batch.VirtualMachineInfo virtualMachineInfo = null) { throw null; } public static Azure.Compute.Batch.BatchNodeAgentInfo BatchNodeAgentInfo(string version = null, System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset)) { throw null; } public static Azure.Compute.Batch.BatchNodeCounts BatchNodeCounts(int creating = 0, int idle = 0, int offline = 0, int preempted = 0, int rebooting = 0, int reimaging = 0, int running = 0, int starting = 0, int startTaskFailed = 0, int leavingPool = 0, int unknown = 0, int unusable = 0, int waitingForStartTask = 0, int deallocated = 0, int deallocating = 0, int total = 0, int upgradingOs = 0) { throw null; } public static Azure.Compute.Batch.BatchNodeEndpointConfiguration BatchNodeEndpointConfiguration(System.Collections.Generic.IEnumerable inboundEndpoints = null) { throw null; } public static Azure.Compute.Batch.BatchNodeError BatchNodeError(string code = null, string message = null, System.Collections.Generic.IEnumerable errorDetails = null) { throw null; } public static Azure.Compute.Batch.BatchNodeFile BatchNodeFile(string name = null, System.Uri uri = null, bool? isDirectory = default(bool?), Azure.Compute.Batch.FileProperties properties = null) { throw null; } public static Azure.Compute.Batch.BatchNodeInfo BatchNodeInfo(string affinityId = null, System.Uri nodeUri = null, string poolId = null, string nodeId = null, string taskRootDirectory = null, System.Uri taskRootDirectoryUri = null) { throw null; } - public static Azure.Compute.Batch.BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(System.Net.IPAddress remoteLoginIpAddress = null, int remoteLoginPort = 0) { throw null; } + public static Azure.Compute.Batch.BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(System.Net.IPAddress ipv6RemoteLoginIpAddress = null, int? ipv6RemoteLoginPort = default(int?), System.Net.IPAddress remoteLoginIpAddress = null, int remoteLoginPort = 0) { throw null; } public static Azure.Compute.Batch.BatchNodeUserCreateOptions BatchNodeUserCreateOptions(string name = null, bool? isAdmin = default(bool?), System.DateTimeOffset? expiryTime = default(System.DateTimeOffset?), string password = null, string sshPublicKey = null) { throw null; } public static Azure.Compute.Batch.BatchNodeVMExtension BatchNodeVMExtension(string provisioningState = null, Azure.Compute.Batch.VMExtension vmExtension = null, Azure.Compute.Batch.VMExtensionInstanceView instanceView = null) { throw null; } - public static Azure.Compute.Batch.BatchPool BatchPool(string id = null, string displayName = null, System.Uri uri = null, Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchPoolState? state = default(Azure.Compute.Batch.BatchPoolState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.AllocationState? allocationState = default(Azure.Compute.Batch.AllocationState?), System.DateTimeOffset? allocationStateTransitionTime = default(System.DateTimeOffset?), string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), System.Collections.Generic.IEnumerable resizeErrors = null, System.Collections.Generic.IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = default(int?), int? currentLowPriorityNodes = default(int?), int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), Azure.Compute.Batch.AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable certificateReferences = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, Azure.Compute.Batch.BatchPoolStatistics poolStatistics = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.BatchPoolIdentity identity = null, Azure.Compute.Batch.BatchNodeCommunicationMode? targetNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.BatchNodeCommunicationMode? currentNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } - public static Azure.Compute.Batch.BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, string displayName = null, string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), System.Collections.Generic.IDictionary resourceTags = null, int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable certificateReferences = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.BatchNodeCommunicationMode? targetNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } + public static Azure.Compute.Batch.BatchPool BatchPool(string id = null, string displayName = null, System.Uri uri = null, Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchPoolState? state = default(Azure.Compute.Batch.BatchPoolState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.AllocationState? allocationState = default(Azure.Compute.Batch.AllocationState?), System.DateTimeOffset? allocationStateTransitionTime = default(System.DateTimeOffset?), string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), System.Collections.Generic.IEnumerable resizeErrors = null, int? currentDedicatedNodes = default(int?), int? currentLowPriorityNodes = default(int?), int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), Azure.Compute.Batch.AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, Azure.Compute.Batch.BatchPoolStatistics poolStatistics = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.BatchPoolIdentity identity = null, Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } + public static Azure.Compute.Batch.BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, string displayName = null, string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } public static Azure.Compute.Batch.BatchPoolIdentity BatchPoolIdentity(Azure.Compute.Batch.BatchPoolIdentityType type = default(Azure.Compute.Batch.BatchPoolIdentityType), System.Collections.Generic.IEnumerable userAssignedIdentities = null) { throw null; } public static Azure.Compute.Batch.BatchPoolNodeCounts BatchPoolNodeCounts(string poolId = null, Azure.Compute.Batch.BatchNodeCounts dedicated = null, Azure.Compute.Batch.BatchNodeCounts lowPriority = null) { throw null; } public static Azure.Compute.Batch.BatchPoolResourceStatistics BatchPoolResourceStatistics(System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), float avgCpuPercentage = 0f, float avgMemoryGiB = 0f, float peakMemoryGiB = 0f, float avgDiskGiB = 0f, float peakDiskGiB = 0f, long diskReadIops = (long)0, long diskWriteIops = (long)0, float diskReadGiB = 0f, float diskWriteGiB = 0f, float networkReadGiB = 0f, float networkWriteGiB = 0f) { throw null; } @@ -2976,6 +2834,7 @@ public DataDisk(int logicalUnitNumber, int diskSizeGb) { } public Azure.Compute.Batch.CachingType? Caching { get { throw null; } set { } } public int DiskSizeGb { get { throw null; } set { } } public int LogicalUnitNumber { get { throw null; } set { } } + public Azure.Compute.Batch.ManagedDisk ManagedDisk { get { throw null; } set { } } public Azure.Compute.Batch.StorageAccountType? StorageAccountType { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.DataDisk System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } @@ -3003,18 +2862,6 @@ public partial class DefaultCreateTaskResultHandler : Azure.Compute.Batch.TaskRe public DefaultCreateTaskResultHandler() { } public override Azure.Compute.Batch.CreateTaskResultStatus CreateTaskResultHandler(Azure.Compute.Batch.CreateTaskResult addTaskResult, System.Threading.CancellationToken cancellationToken) { throw null; } } - public partial class DeleteCertificateOperation : Azure.Operation - { - protected DeleteCertificateOperation() { } - public DeleteCertificateOperation(Azure.Compute.Batch.BatchClient client, string id) { } - public override bool HasCompleted { get { throw null; } } - public override bool HasValue { get { throw null; } } - public override string Id { get { throw null; } } - public override bool Value { get { throw null; } } - public override Azure.Response GetRawResponse() { throw null; } - public override Azure.Response UpdateStatus(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public override System.Threading.Tasks.ValueTask UpdateStatusAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - } public partial class DeleteJobOperation : Azure.Operation { protected DeleteJobOperation() { } @@ -3117,9 +2964,23 @@ public DisableJobOperation(Azure.Compute.Batch.BatchClient client, string id) { public override Azure.Response UpdateStatus(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public override System.Threading.Tasks.ValueTask UpdateStatusAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } } + public partial class DiskCustomerManagedKey : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public DiskCustomerManagedKey() { } + public Azure.Compute.Batch.BatchPoolIdentityReference IdentityReference { get { throw null; } set { } } + public string KeyUrl { get { throw null; } set { } } + public bool? RotationToLatestKeyVersionEnabled { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskCustomerManagedKey System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskCustomerManagedKey System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } public partial class DiskEncryptionConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public DiskEncryptionConfiguration() { } + public Azure.Compute.Batch.DiskCustomerManagedKey CustomerManagedKey { get { throw null; } set { } } public System.Collections.Generic.IList Targets { get { throw null; } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } Azure.Compute.Batch.DiskEncryptionConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } @@ -3128,6 +2989,17 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } + public partial class DiskEncryptionSetParameters : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public DiskEncryptionSetParameters() { } + public Azure.Core.ResourceIdentifier Id { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskEncryptionSetParameters System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskEncryptionSetParameters System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] public readonly partial struct DiskEncryptionTarget : System.IEquatable { @@ -3273,6 +3145,36 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } } + public partial class HostEndpointSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public HostEndpointSettings() { } + public string InVmAccessControlProfileReferenceId { get { throw null; } set { } } + public Azure.Compute.Batch.HostEndpointSettingsModeTypes? Mode { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.HostEndpointSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.HostEndpointSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct HostEndpointSettingsModeTypes : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public HostEndpointSettingsModeTypes(string value) { throw null; } + public static Azure.Compute.Batch.HostEndpointSettingsModeTypes Audit { get { throw null; } } + public static Azure.Compute.Batch.HostEndpointSettingsModeTypes Enforce { get { throw null; } } + public bool Equals(Azure.Compute.Batch.HostEndpointSettingsModeTypes other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.HostEndpointSettingsModeTypes left, Azure.Compute.Batch.HostEndpointSettingsModeTypes right) { throw null; } + public static implicit operator Azure.Compute.Batch.HostEndpointSettingsModeTypes (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.HostEndpointSettingsModeTypes left, Azure.Compute.Batch.HostEndpointSettingsModeTypes right) { throw null; } + public override string ToString() { throw null; } + } [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] public readonly partial struct ImageVerificationType : System.IEquatable { @@ -3359,6 +3261,36 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer public static bool operator !=(Azure.Compute.Batch.IpAddressProvisioningType left, Azure.Compute.Batch.IpAddressProvisioningType right) { throw null; } public override string ToString() { throw null; } } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct IPFamily : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public IPFamily(string value) { throw null; } + public static Azure.Compute.Batch.IPFamily IPv4 { get { throw null; } } + public static Azure.Compute.Batch.IPFamily IPv6 { get { throw null; } } + public bool Equals(Azure.Compute.Batch.IPFamily other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.IPFamily left, Azure.Compute.Batch.IPFamily right) { throw null; } + public static implicit operator Azure.Compute.Batch.IPFamily (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.IPFamily left, Azure.Compute.Batch.IPFamily right) { throw null; } + public override string ToString() { throw null; } + } + public partial class IPTag : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public IPTag() { } + public string IpTagType { get { throw null; } set { } } + public string Tag { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.IPTag System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.IPTag System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } public partial class LinuxUserConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public LinuxUserConfiguration() { } @@ -3393,6 +3325,7 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer public partial class ManagedDisk : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel { public ManagedDisk() { } + public Azure.Compute.Batch.DiskEncryptionSetParameters DiskEncryptionSet { get { throw null; } set { } } public Azure.Compute.Batch.BatchVmDiskSecurityProfile SecurityProfile { get { throw null; } set { } } public Azure.Compute.Batch.StorageAccountType? StorageAccountType { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } @@ -3604,6 +3537,19 @@ public partial class ParallelOperationsException : System.AggregateException internal ParallelOperationsException() { } public override string ToString() { throw null; } } + public partial class ProxyAgentSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ProxyAgentSettings() { } + public bool? Enabled { get { throw null; } set { } } + public Azure.Compute.Batch.HostEndpointSettings Imds { get { throw null; } set { } } + public Azure.Compute.Batch.HostEndpointSettings WireServer { get { throw null; } set { } } + protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ProxyAgentSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ProxyAgentSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } public partial class RebootNodeOperation : Azure.Operation { protected RebootNodeOperation() { } @@ -3745,6 +3691,7 @@ protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer private readonly object _dummy; private readonly int _dummyPrimitive; public SecurityEncryptionTypes(string value) { throw null; } + public static Azure.Compute.Batch.SecurityEncryptionTypes DiskWithVMGuestState { get { throw null; } } public static Azure.Compute.Batch.SecurityEncryptionTypes NonPersistedTPM { get { throw null; } } public static Azure.Compute.Batch.SecurityEncryptionTypes VMGuestStateOnly { get { throw null; } } public bool Equals(Azure.Compute.Batch.SecurityEncryptionTypes other) { throw null; } @@ -3761,6 +3708,7 @@ public partial class SecurityProfile : System.ClientModel.Primitives.IJsonModel< { public SecurityProfile() { } public bool? EncryptionAtHost { get { throw null; } set { } } + public Azure.Compute.Batch.ProxyAgentSettings ProxyAgentSettings { get { throw null; } set { } } public Azure.Compute.Batch.SecurityTypes? SecurityType { get { throw null; } set { } } public Azure.Compute.Batch.BatchUefiSettings UefiSettings { get { throw null; } set { } } protected virtual void JsonModelWriteCore(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } diff --git a/sdk/batch/Azure.Compute.Batch/assets.json b/sdk/batch/Azure.Compute.Batch/assets.json index 672c7493dea3..3086a778363b 100644 --- a/sdk/batch/Azure.Compute.Batch/assets.json +++ b/sdk/batch/Azure.Compute.Batch/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/batch/Azure.Compute.Batch", - "Tag": "net/batch/Azure.Compute.Batch_a2908b28d7" + "Tag": "net/batch/Azure.Compute.Batch_03f255448a" } diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/BatchClientCustom.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/BatchClientCustom.cs index 434c824606d2..cf04e2441afa 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Custom/BatchClientCustom.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/BatchClientCustom.cs @@ -800,86 +800,6 @@ public virtual DeleteJobOperation DeleteJob(string jobId, TimeSpan? timeOutInSec } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Deletes a Certificate from the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate to be deleted. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The DeleteCertificateOperation object to allow for polling of operation status. - public virtual async Task DeleteCertificateAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) - { - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificate"); - scope.Start(); - try - { - Response response = await DeleteCertificateInternalAsync(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); - return new DeleteCertificateOperation(this, thumbprintAlgorithm, thumbprint, response); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Deletes a Certificate from the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate to be deleted. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The DeleteCertificateOperation object to allow for polling of operation status. - public virtual DeleteCertificateOperation DeleteCertificate(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) - { - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificate"); - scope.Start(); - try - { - Response response = DeleteCertificateInternal(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); - return new DeleteCertificateOperation(this, thumbprintAlgorithm, thumbprint, response); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// /// [Protocol Method] Deletes a Job Schedule from the specified Account. diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/LongRunningOperations/DeleteCertificateOperation.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/LongRunningOperations/DeleteCertificateOperation.cs deleted file mode 100644 index 22eca314055f..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Custom/LongRunningOperations/DeleteCertificateOperation.cs +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core; -using Azure.Core.Pipeline; - -namespace Azure.Compute.Batch -{ - /// - /// Upon success a Certificate will be deleted - /// - public class DeleteCertificateOperation : Operation - { - /// - /// The client used to check for completion. - /// - private readonly BatchClient _client; - - /// - /// Whether the operation has completed. - /// - private bool _hasCompleted; - - /// - /// Gets the success of the operation. - /// - private bool? _value; - private Response _rawResponse; - private string _thumbprintAlgorithm; - private string _thumbprint; - - /// - /// Initializes a new instance - /// - /// - /// The client used to check for completion. - /// - /// thumbprint Algortihm of certificate - /// thumbprint of certificate - /// - /// Either the response from initiating the operation or getting the - /// status if we're creating an operation from an existing ID. - /// - internal DeleteCertificateOperation( - BatchClient client, - string thumbprintAlgorithm, - string thumbprint, - Response initialResponse) - { - _thumbprint = thumbprint; - _thumbprintAlgorithm = thumbprintAlgorithm; - Id = _thumbprint + ";" + _thumbprintAlgorithm; - _value = false; - _rawResponse = initialResponse; - _client = client; - } - - /// - /// Initializes a new instance - /// - /// - /// The client used to check for completion. - /// - /// The ID of this operation. - public DeleteCertificateOperation( - BatchClient client, - string id) - { - if (String.IsNullOrEmpty(id)) - throw new ArgumentNullException("id is not formated correctly"); - string[] idSplit = id.Split(';'); - _thumbprint = idSplit[0]; - _thumbprintAlgorithm = idSplit[1]; - Id = id; - _value = false; - _rawResponse = null; - _client = client; - } - - /// - /// Initializes a new instance for - /// mocking. - /// - protected DeleteCertificateOperation() - { - } - - /// - /// Get the sucess state of the deletion operation - /// - public override bool Value => OperationHelpers.GetValue(ref _value); - - /// - /// Gets a value indicating whether the operation completed and - /// successfully produced a value. The - /// property is the success of the operation. - /// - public override bool HasValue => _value.HasValue; - - /// - public override string Id { get; } - - /// - /// Gets a value indicating whether the operation has completed. - /// - public override bool HasCompleted => _hasCompleted; - - /// - public override Response GetRawResponse() => _rawResponse; - - /// - /// Check for the latest status of the delete operation. - /// - /// - /// Optional to propagate - /// notifications that the operation should be cancelled. - /// - /// The with the status update. - public override Response UpdateStatus(CancellationToken cancellationToken = default) => - UpdateStatusAsync(false, cancellationToken).EnsureCompleted(); - - /// - /// Check for the latest status of the delete operation. - /// - /// - /// Optional to propagate - /// notifications that the operation should be cancelled. - /// - /// The with the status update. - public override async ValueTask UpdateStatusAsync(CancellationToken cancellationToken = default) => - await UpdateStatusAsync(true, cancellationToken).ConfigureAwait(false); - - /// - /// Check for the latest status of the copy operation. - /// - /// - /// Optional to propagate - /// notifications that the operation should be cancelled. - /// - /// - /// The with the status update. - private async Task UpdateStatusAsync(bool async, CancellationToken cancellationToken) - { - // Short-circuit when already completed (which improves mocking - // scenarios that won't have a client). - if (HasCompleted) - { - return GetRawResponse(); - } - - // Get the latest status - Response deleteResponse = null; - try - { - deleteResponse = async - ? await _client.GetCertificateAsync(_thumbprintAlgorithm,_thumbprint, cancellationToken: cancellationToken).ConfigureAwait(false) - : _client.GetCertificate(_thumbprintAlgorithm, _thumbprint, cancellationToken: cancellationToken); - } - catch (Azure.RequestFailedException e) - { - if (e.Status == 404) - { - _value = true; - _hasCompleted = true; - _rawResponse = e.GetRawResponse(); - } - else - { - throw; // throw if not 404 - } - } - - if (deleteResponse != null) - { - _rawResponse = deleteResponse.GetRawResponse(); - } - return _rawResponse; - } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs index ecd162d250ab..e642c851a28d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs @@ -51,7 +51,7 @@ public AutoUserSpecification() } /// Initializes a new instance of . - /// The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks. + /// The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks. /// The elevation level of the auto user. The default value is nonAdmin. /// Keeps track of any properties unknown to the library. internal AutoUserSpecification(AutoUserScope? scope, ElevationLevel? elevationLevel, IDictionary serializedAdditionalRawData) @@ -61,7 +61,7 @@ internal AutoUserSpecification(AutoUserScope? scope, ElevationLevel? elevationLe _serializedAdditionalRawData = serializedAdditionalRawData; } - /// The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks. + /// The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks. public AutoUserScope? Scope { get; set; } /// The elevation level of the auto user. The default value is nonAdmin. public ElevationLevel? ElevationLevel { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs deleted file mode 100644 index 7ebcdc4fd7b8..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; -using Azure.Core; - -namespace Azure.Compute.Batch -{ - public partial class BatchCertificate : IUtf8JsonSerializable, IJsonModel - { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - writer.WriteStartObject(); - JsonModelWriteCore(writer, options); - writer.WriteEndObject(); - } - - /// The JSON writer. - /// The client options for reading and writing models. - protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchCertificate)} does not support writing '{format}' format."); - } - - writer.WritePropertyName("thumbprint"u8); - writer.WriteStringValue(Thumbprint); - writer.WritePropertyName("thumbprintAlgorithm"u8); - writer.WriteStringValue(ThumbprintAlgorithm); - if (options.Format != "W" && Optional.IsDefined(Uri)) - { - writer.WritePropertyName("url"u8); - writer.WriteStringValue(Uri.AbsoluteUri); - } - if (options.Format != "W" && Optional.IsDefined(State)) - { - writer.WritePropertyName("state"u8); - writer.WriteStringValue(State.Value.ToString()); - } - if (options.Format != "W" && Optional.IsDefined(StateTransitionTime)) - { - writer.WritePropertyName("stateTransitionTime"u8); - writer.WriteStringValue(StateTransitionTime.Value, "O"); - } - if (options.Format != "W" && Optional.IsDefined(PreviousState)) - { - writer.WritePropertyName("previousState"u8); - writer.WriteStringValue(PreviousState.Value.ToString()); - } - if (options.Format != "W" && Optional.IsDefined(PreviousStateTransitionTime)) - { - writer.WritePropertyName("previousStateTransitionTime"u8); - writer.WriteStringValue(PreviousStateTransitionTime.Value, "O"); - } - if (options.Format != "W" && Optional.IsDefined(PublicData)) - { - writer.WritePropertyName("publicData"u8); - writer.WriteStringValue(PublicData); - } - if (options.Format != "W" && Optional.IsDefined(DeleteCertificateError)) - { - writer.WritePropertyName("deleteCertificateError"u8); - writer.WriteObjectValue(DeleteCertificateError, options); - } - writer.WritePropertyName("data"u8); - writer.WriteBase64StringValue(Data.ToArray(), "D"); - if (Optional.IsDefined(CertificateFormat)) - { - writer.WritePropertyName("certificateFormat"u8); - writer.WriteStringValue(CertificateFormat.Value.ToString()); - } - if (Optional.IsDefined(Password)) - { - writer.WritePropertyName("password"u8); - writer.WriteStringValue(Password); - } - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - } - - BatchCertificate IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchCertificate)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchCertificate(document.RootElement, options); - } - - internal static BatchCertificate DeserializeBatchCertificate(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string thumbprint = default; - string thumbprintAlgorithm = default; - Uri url = default; - BatchCertificateState? state = default; - DateTimeOffset? stateTransitionTime = default; - BatchCertificateState? previousState = default; - DateTimeOffset? previousStateTransitionTime = default; - string publicData = default; - BatchCertificateDeleteError deleteCertificateError = default; - BinaryData data = default; - BatchCertificateFormat? certificateFormat = default; - string password = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("thumbprint"u8)) - { - thumbprint = property.Value.GetString(); - continue; - } - if (property.NameEquals("thumbprintAlgorithm"u8)) - { - thumbprintAlgorithm = property.Value.GetString(); - continue; - } - if (property.NameEquals("url"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - url = new Uri(property.Value.GetString()); - continue; - } - if (property.NameEquals("state"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - state = new BatchCertificateState(property.Value.GetString()); - continue; - } - if (property.NameEquals("stateTransitionTime"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - stateTransitionTime = property.Value.GetDateTimeOffset("O"); - continue; - } - if (property.NameEquals("previousState"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - previousState = new BatchCertificateState(property.Value.GetString()); - continue; - } - if (property.NameEquals("previousStateTransitionTime"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - previousStateTransitionTime = property.Value.GetDateTimeOffset("O"); - continue; - } - if (property.NameEquals("publicData"u8)) - { - publicData = property.Value.GetString(); - continue; - } - if (property.NameEquals("deleteCertificateError"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - deleteCertificateError = BatchCertificateDeleteError.DeserializeBatchCertificateDeleteError(property.Value, options); - continue; - } - if (property.NameEquals("data"u8)) - { - data = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); - continue; - } - if (property.NameEquals("certificateFormat"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - certificateFormat = new BatchCertificateFormat(property.Value.GetString()); - continue; - } - if (property.NameEquals("password"u8)) - { - password = property.Value.GetString(); - continue; - } - if (options.Format != "W") - { - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new BatchCertificate( - thumbprint, - thumbprintAlgorithm, - url, - state, - stateTransitionTime, - previousState, - previousStateTransitionTime, - publicData, - deleteCertificateError, - data, - certificateFormat, - password, - serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); - default: - throw new FormatException($"The model {nameof(BatchCertificate)} does not support writing '{options.Format}' format."); - } - } - - BatchCertificate IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchCertificate(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(BatchCertificate)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The response to deserialize the model from. - internal static BatchCertificate FromResponse(Response response) - { - using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchCertificate(document.RootElement); - } - - /// Convert into a . - internal virtual RequestContent ToRequestContent() - { - var content = new Utf8JsonRequestContent(); - content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); - return content; - } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs deleted file mode 100644 index 1f9cc653afbe..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace Azure.Compute.Batch -{ - /// - /// A Certificate that can be installed on Compute Nodes and can be used to - /// authenticate operations on the machine. - /// - public partial class BatchCertificate - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). - /// The algorithm used to derive the thumbprint. This must be sha1. - /// The base64-encoded contents of the Certificate. The maximum size is 10KB. - /// , or is null. - public BatchCertificate(string thumbprint, string thumbprintAlgorithm, BinaryData data) - { - Argument.AssertNotNull(thumbprint, nameof(thumbprint)); - Argument.AssertNotNull(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNull(data, nameof(data)); - - Thumbprint = thumbprint; - ThumbprintAlgorithm = thumbprintAlgorithm; - Data = data; - } - - /// Initializes a new instance of . - /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). - /// The algorithm used to derive the thumbprint. This must be sha1. - /// The URL of the Certificate. - /// The state of the Certificate. - /// The time at which the Certificate entered its current state. - /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. - /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. - /// The public part of the Certificate as a base-64 encoded .cer file. - /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. - /// The base64-encoded contents of the Certificate. The maximum size is 10KB. - /// The format of the Certificate data. - /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. - /// Keeps track of any properties unknown to the library. - internal BatchCertificate(string thumbprint, string thumbprintAlgorithm, Uri uri, BatchCertificateState? state, DateTimeOffset? stateTransitionTime, BatchCertificateState? previousState, DateTimeOffset? previousStateTransitionTime, string publicData, BatchCertificateDeleteError deleteCertificateError, BinaryData data, BatchCertificateFormat? certificateFormat, string password, IDictionary serializedAdditionalRawData) - { - Thumbprint = thumbprint; - ThumbprintAlgorithm = thumbprintAlgorithm; - Uri = uri; - State = state; - StateTransitionTime = stateTransitionTime; - PreviousState = previousState; - PreviousStateTransitionTime = previousStateTransitionTime; - PublicData = publicData; - DeleteCertificateError = deleteCertificateError; - Data = data; - CertificateFormat = certificateFormat; - Password = password; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// Initializes a new instance of for deserialization. - internal BatchCertificate() - { - } - - /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). - public string Thumbprint { get; set; } - /// The algorithm used to derive the thumbprint. This must be sha1. - public string ThumbprintAlgorithm { get; set; } - /// The URL of the Certificate. - public Uri Uri { get; } - /// The state of the Certificate. - public BatchCertificateState? State { get; } - /// The time at which the Certificate entered its current state. - public DateTimeOffset? StateTransitionTime { get; } - /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. - public BatchCertificateState? PreviousState { get; } - /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. - public DateTimeOffset? PreviousStateTransitionTime { get; } - /// The public part of the Certificate as a base-64 encoded .cer file. - public string PublicData { get; } - /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. - public BatchCertificateDeleteError DeleteCertificateError { get; } - /// - /// The base64-encoded contents of the Certificate. The maximum size is 10KB. - /// - /// To assign a byte[] to this property use . - /// The byte[] will be serialized to a Base64 encoded string. - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) - /// Creates a payload of "AQID". - /// - /// - /// - /// - public BinaryData Data { get; set; } - /// The format of the Certificate data. - public BatchCertificateFormat? CertificateFormat { get; set; } - /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. - public string Password { get; set; } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.cs deleted file mode 100644 index a731bc507c15..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.cs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace Azure.Compute.Batch -{ - /// An error encountered by the Batch service when deleting a Certificate. - public partial class BatchCertificateDeleteError - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - internal BatchCertificateDeleteError() - { - Values = new ChangeTrackingList(); - } - - /// Initializes a new instance of . - /// An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. - /// A message describing the Certificate deletion error, intended to be suitable for display in a user interface. - /// A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. - /// Keeps track of any properties unknown to the library. - internal BatchCertificateDeleteError(string code, string message, IReadOnlyList values, IDictionary serializedAdditionalRawData) - { - Code = code; - Message = message; - Values = values; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. - public string Code { get; } - /// A message describing the Certificate deletion error, intended to be suitable for display in a user interface. - public string Message { get; } - /// A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. - public IReadOnlyList Values { get; } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateFormat.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateFormat.cs deleted file mode 100644 index 121b2cffc624..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateFormat.cs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace Azure.Compute.Batch -{ - /// BatchCertificateFormat enums. - public readonly partial struct BatchCertificateFormat : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public BatchCertificateFormat(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string PfxValue = "pfx"; - private const string CerValue = "cer"; - - /// The Certificate is a PFX (PKCS#12) formatted Certificate or Certificate chain. - public static BatchCertificateFormat Pfx { get; } = new BatchCertificateFormat(PfxValue); - /// The Certificate is a base64-encoded X.509 Certificate. - public static BatchCertificateFormat Cer { get; } = new BatchCertificateFormat(CerValue); - /// Determines if two values are the same. - public static bool operator ==(BatchCertificateFormat left, BatchCertificateFormat right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(BatchCertificateFormat left, BatchCertificateFormat right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator BatchCertificateFormat(string value) => new BatchCertificateFormat(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is BatchCertificateFormat other && Equals(other); - /// - public bool Equals(BatchCertificateFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - /// - public override string ToString() => _value; - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.Serialization.cs deleted file mode 100644 index 81db242c4548..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.Serialization.cs +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; -using Azure.Core; - -namespace Azure.Compute.Batch -{ - public partial class BatchCertificateReference : IUtf8JsonSerializable, IJsonModel - { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - writer.WriteStartObject(); - JsonModelWriteCore(writer, options); - writer.WriteEndObject(); - } - - /// The JSON writer. - /// The client options for reading and writing models. - protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchCertificateReference)} does not support writing '{format}' format."); - } - - writer.WritePropertyName("thumbprint"u8); - writer.WriteStringValue(Thumbprint); - writer.WritePropertyName("thumbprintAlgorithm"u8); - writer.WriteStringValue(ThumbprintAlgorithm); - if (Optional.IsDefined(StoreLocation)) - { - writer.WritePropertyName("storeLocation"u8); - writer.WriteStringValue(StoreLocation.Value.ToString()); - } - if (Optional.IsDefined(StoreName)) - { - writer.WritePropertyName("storeName"u8); - writer.WriteStringValue(StoreName); - } - if (Optional.IsCollectionDefined(Visibility)) - { - writer.WritePropertyName("visibility"u8); - writer.WriteStartArray(); - foreach (var item in Visibility) - { - writer.WriteStringValue(item.ToString()); - } - writer.WriteEndArray(); - } - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - } - - BatchCertificateReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchCertificateReference)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchCertificateReference(document.RootElement, options); - } - - internal static BatchCertificateReference DeserializeBatchCertificateReference(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string thumbprint = default; - string thumbprintAlgorithm = default; - BatchCertificateStoreLocation? storeLocation = default; - string storeName = default; - IList visibility = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("thumbprint"u8)) - { - thumbprint = property.Value.GetString(); - continue; - } - if (property.NameEquals("thumbprintAlgorithm"u8)) - { - thumbprintAlgorithm = property.Value.GetString(); - continue; - } - if (property.NameEquals("storeLocation"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - storeLocation = new BatchCertificateStoreLocation(property.Value.GetString()); - continue; - } - if (property.NameEquals("storeName"u8)) - { - storeName = property.Value.GetString(); - continue; - } - if (property.NameEquals("visibility"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(new BatchCertificateVisibility(item.GetString())); - } - visibility = array; - continue; - } - if (options.Format != "W") - { - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new BatchCertificateReference( - thumbprint, - thumbprintAlgorithm, - storeLocation, - storeName, - visibility ?? new ChangeTrackingList(), - serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); - default: - throw new FormatException($"The model {nameof(BatchCertificateReference)} does not support writing '{options.Format}' format."); - } - } - - BatchCertificateReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchCertificateReference(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(BatchCertificateReference)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The response to deserialize the model from. - internal static BatchCertificateReference FromResponse(Response response) - { - using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchCertificateReference(document.RootElement); - } - - /// Convert into a . - internal virtual RequestContent ToRequestContent() - { - var content = new Utf8JsonRequestContent(); - content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); - return content; - } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.cs deleted file mode 100644 index 5255ea9eff66..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.cs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace Azure.Compute.Batch -{ - /// A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - public partial class BatchCertificateReference - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - /// The thumbprint of the Certificate. - /// The algorithm with which the thumbprint is associated. This must be sha1. - /// or is null. - public BatchCertificateReference(string thumbprint, string thumbprintAlgorithm) - { - Argument.AssertNotNull(thumbprint, nameof(thumbprint)); - Argument.AssertNotNull(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - - Thumbprint = thumbprint; - ThumbprintAlgorithm = thumbprintAlgorithm; - Visibility = new ChangeTrackingList(); - } - - /// Initializes a new instance of . - /// The thumbprint of the Certificate. - /// The algorithm with which the thumbprint is associated. This must be sha1. - /// The location of the Certificate store on the Compute Node into which to install the Certificate. The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// The name of the Certificate store on the Compute Node into which to install the Certificate. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. - /// Which user Accounts on the Compute Node should have access to the private data of the Certificate. You can specify more than one visibility in this collection. The default is all Accounts. - /// Keeps track of any properties unknown to the library. - internal BatchCertificateReference(string thumbprint, string thumbprintAlgorithm, BatchCertificateStoreLocation? storeLocation, string storeName, IList visibility, IDictionary serializedAdditionalRawData) - { - Thumbprint = thumbprint; - ThumbprintAlgorithm = thumbprintAlgorithm; - StoreLocation = storeLocation; - StoreName = storeName; - Visibility = visibility; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// Initializes a new instance of for deserialization. - internal BatchCertificateReference() - { - } - - /// The thumbprint of the Certificate. - public string Thumbprint { get; set; } - /// The algorithm with which the thumbprint is associated. This must be sha1. - public string ThumbprintAlgorithm { get; set; } - /// The location of the Certificate store on the Compute Node into which to install the Certificate. The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - public BatchCertificateStoreLocation? StoreLocation { get; set; } - /// The name of the Certificate store on the Compute Node into which to install the Certificate. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. - public string StoreName { get; set; } - /// Which user Accounts on the Compute Node should have access to the private data of the Certificate. You can specify more than one visibility in this collection. The default is all Accounts. - public IList Visibility { get; } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateState.cs deleted file mode 100644 index 59674caeb91c..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateState.cs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace Azure.Compute.Batch -{ - /// BatchCertificateState enums. - public readonly partial struct BatchCertificateState : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public BatchCertificateState(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string ActiveValue = "active"; - private const string DeletingValue = "deleting"; - private const string DeleteFailedValue = "deletefailed"; - - /// The Certificate is available for use in Pools. - public static BatchCertificateState Active { get; } = new BatchCertificateState(ActiveValue); - /// The user has requested that the Certificate be deleted, but the delete operation has not yet completed. You may not reference the Certificate when creating or updating Pools. - public static BatchCertificateState Deleting { get; } = new BatchCertificateState(DeletingValue); - /// The user requested that the Certificate be deleted, but there are Pools that still have references to the Certificate, or it is still installed on one or more Nodes. (The latter can occur if the Certificate has been removed from the Pool, but the Compute Node has not yet restarted. Compute Nodes refresh their Certificates only when they restart.) You may use the cancel Certificate delete operation to cancel the delete, or the delete Certificate operation to retry the delete. - public static BatchCertificateState DeleteFailed { get; } = new BatchCertificateState(DeleteFailedValue); - /// Determines if two values are the same. - public static bool operator ==(BatchCertificateState left, BatchCertificateState right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(BatchCertificateState left, BatchCertificateState right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator BatchCertificateState(string value) => new BatchCertificateState(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is BatchCertificateState other && Equals(other); - /// - public bool Equals(BatchCertificateState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - /// - public override string ToString() => _value; - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateStoreLocation.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateStoreLocation.cs deleted file mode 100644 index 191a6833d3f3..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateStoreLocation.cs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace Azure.Compute.Batch -{ - /// BatchCertificateStoreLocation enums. - public readonly partial struct BatchCertificateStoreLocation : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public BatchCertificateStoreLocation(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string CurrentUserValue = "currentuser"; - private const string LocalMachineValue = "localmachine"; - - /// Certificates should be installed to the CurrentUser Certificate store. - public static BatchCertificateStoreLocation CurrentUser { get; } = new BatchCertificateStoreLocation(CurrentUserValue); - /// Certificates should be installed to the LocalMachine Certificate store. - public static BatchCertificateStoreLocation LocalMachine { get; } = new BatchCertificateStoreLocation(LocalMachineValue); - /// Determines if two values are the same. - public static bool operator ==(BatchCertificateStoreLocation left, BatchCertificateStoreLocation right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(BatchCertificateStoreLocation left, BatchCertificateStoreLocation right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator BatchCertificateStoreLocation(string value) => new BatchCertificateStoreLocation(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is BatchCertificateStoreLocation other && Equals(other); - /// - public bool Equals(BatchCertificateStoreLocation other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - /// - public override string ToString() => _value; - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateVisibility.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateVisibility.cs deleted file mode 100644 index ecd194378a4a..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateVisibility.cs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace Azure.Compute.Batch -{ - /// BatchCertificateVisibility enums. - public readonly partial struct BatchCertificateVisibility : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public BatchCertificateVisibility(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string StartTaskValue = "starttask"; - private const string TaskValue = "task"; - private const string RemoteUserValue = "remoteuser"; - - /// The Certificate should be visible to the user account under which the StartTask is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this certificate will be visible to the Task as well. - public static BatchCertificateVisibility StartTask { get; } = new BatchCertificateVisibility(StartTaskValue); - /// The Certificate should be visible to the user accounts under which Job Tasks are run. - public static BatchCertificateVisibility Task { get; } = new BatchCertificateVisibility(TaskValue); - /// The Certificate should be visible to the user accounts under which users remotely access the Compute Node. - public static BatchCertificateVisibility RemoteUser { get; } = new BatchCertificateVisibility(RemoteUserValue); - /// Determines if two values are the same. - public static bool operator ==(BatchCertificateVisibility left, BatchCertificateVisibility right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(BatchCertificateVisibility left, BatchCertificateVisibility right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator BatchCertificateVisibility(string value) => new BatchCertificateVisibility(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is BatchCertificateVisibility other && Equals(other); - /// - public bool Equals(BatchCertificateVisibility other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - /// - public override string ToString() => _value; - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs index b8c2f9928972..2f58dc434cd8 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs @@ -2794,458 +2794,6 @@ public virtual Response GetJobTaskCounts(string jobId, TimeSpan? timeOutInSecond } } - /// Creates a Certificate to the specified Account. - /// The Certificate to be created. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The cancellation token to use. - /// is null. - /// - public virtual async Task CreateCertificateAsync(BatchCertificate certificate, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNull(certificate, nameof(certificate)); - - using RequestContent content = certificate.ToRequestContent(); - RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateCertificateAsync(content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); - return response; - } - - /// Creates a Certificate to the specified Account. - /// The Certificate to be created. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The cancellation token to use. - /// is null. - /// - public virtual Response CreateCertificate(BatchCertificate certificate, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNull(certificate, nameof(certificate)); - - using RequestContent content = certificate.ToRequestContent(); - RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateCertificate(content, timeOutInSeconds, ocpDate, context); - return response; - } - - /// - /// [Protocol Method] Creates a Certificate to the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The content to send as the body of the request. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual async Task CreateCertificateAsync(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) - { - Argument.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateCertificate"); - scope.Start(); - try - { - using HttpMessage message = CreateCreateCertificateRequest(content, timeOutInSeconds, ocpDate, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Creates a Certificate to the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The content to send as the body of the request. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual Response CreateCertificate(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) - { - Argument.AssertNotNull(content, nameof(content)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateCertificate"); - scope.Start(); - try - { - using HttpMessage message = CreateCreateCertificateRequest(content, timeOutInSeconds, ocpDate, context); - return _pipeline.ProcessMessage(message, context); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Cancels a failed deletion of a Certificate from the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate being deleted. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual async Task CancelCertificateDeletionAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) - { - Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.CancelCertificateDeletion"); - scope.Start(); - try - { - using HttpMessage message = CreateCancelCertificateDeletionRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Cancels a failed deletion of a Certificate from the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate being deleted. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual Response CancelCertificateDeletion(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) - { - Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.CancelCertificateDeletion"); - scope.Start(); - try - { - using HttpMessage message = CreateCancelCertificateDeletionRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); - return _pipeline.ProcessMessage(message, context); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Deletes a Certificate from the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate to be deleted. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - internal virtual async Task DeleteCertificateInternalAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) - { - Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificateInternal"); - scope.Start(); - try - { - using HttpMessage message = CreateDeleteCertificateInternalRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Deletes a Certificate from the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate to be deleted. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - internal virtual Response DeleteCertificateInternal(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) - { - Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificateInternal"); - scope.Start(); - try - { - using HttpMessage message = CreateDeleteCertificateInternalRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); - return _pipeline.ProcessMessage(message, context); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// Gets information about the specified Certificate. - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate to get. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// An OData $select clause. - /// The cancellation token to use. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - - RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetCertificateAsync(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, select, context).ConfigureAwait(false); - return Response.FromValue(BatchCertificate.FromResponse(response), response); - } - - /// Gets information about the specified Certificate. - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate to get. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// An OData $select clause. - /// The cancellation token to use. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// - public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - - RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetCertificate(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, select, context); - return Response.FromValue(BatchCertificate.FromResponse(response), response); - } - - /// - /// [Protocol Method] Gets information about the specified Certificate. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate to get. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// An OData $select clause. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual async Task GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) - { - Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetCertificate"); - scope.Start(); - try - { - using HttpMessage message = CreateGetCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, select, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// - /// [Protocol Method] Gets information about the specified Certificate. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The algorithm used to derive the thumbprint parameter. This must be sha1. - /// The thumbprint of the Certificate to get. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// An OData $select clause. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) - { - Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); - Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetCertificate"); - scope.Start(); - try - { - using HttpMessage message = CreateGetCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, select, context); - return _pipeline.ProcessMessage(message, context); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// /// [Protocol Method] Deletes a Job Schedule from the specified Account. @@ -8000,7 +7548,7 @@ public virtual Pageable GetPoolUsageMetrics(TimeSpan? timeOutInSecon return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); } - /// Lists all of the Pools which be mounted. + /// Lists all of the Pools in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -8027,7 +7575,7 @@ public virtual AsyncPageable GetPoolsAsync(TimeSpan? timeOutInSeconds return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchPool.DeserializeBatchPool(e), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); } - /// Lists all of the Pools which be mounted. + /// Lists all of the Pools in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -8055,7 +7603,7 @@ public virtual Pageable GetPools(TimeSpan? timeOutInSeconds = null, D } /// - /// [Protocol Method] Lists all of the Pools which be mounted. + /// [Protocol Method] Lists all of the Pools in the specified Account. /// /// /// @@ -8097,7 +7645,7 @@ public virtual AsyncPageable GetPoolsAsync(TimeSpan? timeOutInSecond } /// - /// [Protocol Method] Lists all of the Pools which be mounted. + /// [Protocol Method] Lists all of the Pools in the specified Account. /// /// /// @@ -8884,140 +8432,6 @@ public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(stri return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); } - /// Lists all of the Certificates that have been added to the specified Account. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// - /// The maximum number of items to return in the response. A maximum of 1000 - /// applications can be returned. - /// - /// - /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. - /// - /// An OData $select clause. - /// The cancellation token to use. - /// - public virtual AsyncPageable GetCertificatesAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) - { - RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, context); - return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchCertificate.DeserializeBatchCertificate(e), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); - } - - /// Lists all of the Certificates that have been added to the specified Account. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// - /// The maximum number of items to return in the response. A maximum of 1000 - /// applications can be returned. - /// - /// - /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. - /// - /// An OData $select clause. - /// The cancellation token to use. - /// - public virtual Pageable GetCertificates(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) - { - RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, context); - return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchCertificate.DeserializeBatchCertificate(e), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); - } - - /// - /// [Protocol Method] Lists all of the Certificates that have been added to the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// - /// The maximum number of items to return in the response. A maximum of 1000 - /// applications can be returned. - /// - /// - /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. - /// - /// An OData $select clause. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. - /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetCertificatesAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) - { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, context); - return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); - } - - /// - /// [Protocol Method] Lists all of the Certificates that have been added to the specified Account. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// - /// - /// - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// - /// The maximum number of items to return in the response. A maximum of 1000 - /// applications can be returned. - /// - /// - /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. - /// - /// An OData $select clause. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// Service returned a non-success status code. - /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetCertificates(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) - { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, context); - return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); - } - /// Lists all of the Job Schedules in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// @@ -11042,153 +10456,6 @@ internal HttpMessage CreateGetJobTaskCountsRequest(string jobId, TimeSpan? timeO return message; } - internal HttpMessage CreateCreateCertificateRequest(RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) - { - var message = _pipeline.CreateMessage(context, ResponseClassifier201); - var request = message.Request; - request.Method = RequestMethod.Post; - var uri = new RawRequestUriBuilder(); - uri.Reset(_endpoint); - uri.AppendPath("/certificates", false); - uri.AppendQuery("api-version", _apiVersion, true); - if (timeOutInSeconds != null) - { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); - } - request.Uri = uri; - request.Headers.Add("client-request-id", message.Request.ClientRequestId); - request.Headers.Add("return-client-request-id", "true"); - if (ocpDate != null) - { - request.Headers.Add("ocp-date", ocpDate.Value, "R"); - } - request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); - request.Content = content; - return message; - } - - internal HttpMessage CreateGetCertificatesRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) - { - var message = _pipeline.CreateMessage(context, ResponseClassifier200); - var request = message.Request; - request.Method = RequestMethod.Get; - var uri = new RawRequestUriBuilder(); - uri.Reset(_endpoint); - uri.AppendPath("/certificates", false); - uri.AppendQuery("api-version", _apiVersion, true); - if (timeOutInSeconds != null) - { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); - } - if (maxresults != null) - { - uri.AppendQuery("maxresults", maxresults.Value, true); - } - if (filter != null) - { - uri.AppendQuery("$filter", filter, true); - } - if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) - { - uri.AppendQueryDelimited("$select", select, ",", true); - } - request.Uri = uri; - request.Headers.Add("Accept", "application/json"); - request.Headers.Add("client-request-id", message.Request.ClientRequestId); - request.Headers.Add("return-client-request-id", "true"); - if (ocpDate != null) - { - request.Headers.Add("ocp-date", ocpDate.Value, "R"); - } - return message; - } - - internal HttpMessage CreateCancelCertificateDeletionRequest(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) - { - var message = _pipeline.CreateMessage(context, ResponseClassifier204); - var request = message.Request; - request.Method = RequestMethod.Post; - var uri = new RawRequestUriBuilder(); - uri.Reset(_endpoint); - uri.AppendPath("/certificates(thumbprintAlgorithm=", false); - uri.AppendPath(thumbprintAlgorithm, true); - uri.AppendPath(",thumbprint=", false); - uri.AppendPath(thumbprint, true); - uri.AppendPath(")/canceldelete", false); - uri.AppendQuery("api-version", _apiVersion, true); - if (timeOutInSeconds != null) - { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); - } - request.Uri = uri; - request.Headers.Add("client-request-id", message.Request.ClientRequestId); - request.Headers.Add("return-client-request-id", "true"); - if (ocpDate != null) - { - request.Headers.Add("ocp-date", ocpDate.Value, "R"); - } - return message; - } - - internal HttpMessage CreateDeleteCertificateInternalRequest(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) - { - var message = _pipeline.CreateMessage(context, ResponseClassifier202); - var request = message.Request; - request.Method = RequestMethod.Delete; - var uri = new RawRequestUriBuilder(); - uri.Reset(_endpoint); - uri.AppendPath("/certificates(thumbprintAlgorithm=", false); - uri.AppendPath(thumbprintAlgorithm, true); - uri.AppendPath(",thumbprint=", false); - uri.AppendPath(thumbprint, true); - uri.AppendPath(")", false); - uri.AppendQuery("api-version", _apiVersion, true); - if (timeOutInSeconds != null) - { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); - } - request.Uri = uri; - request.Headers.Add("client-request-id", message.Request.ClientRequestId); - request.Headers.Add("return-client-request-id", "true"); - if (ocpDate != null) - { - request.Headers.Add("ocp-date", ocpDate.Value, "R"); - } - return message; - } - - internal HttpMessage CreateGetCertificateRequest(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) - { - var message = _pipeline.CreateMessage(context, ResponseClassifier200); - var request = message.Request; - request.Method = RequestMethod.Get; - var uri = new RawRequestUriBuilder(); - uri.Reset(_endpoint); - uri.AppendPath("/certificates(thumbprintAlgorithm=", false); - uri.AppendPath(thumbprintAlgorithm, true); - uri.AppendPath(",thumbprint=", false); - uri.AppendPath(thumbprint, true); - uri.AppendPath(")", false); - uri.AppendQuery("api-version", _apiVersion, true); - if (timeOutInSeconds != null) - { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); - } - if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) - { - uri.AppendQueryDelimited("$select", select, ",", true); - } - request.Uri = uri; - request.Headers.Add("Accept", "application/json"); - request.Headers.Add("client-request-id", message.Request.ClientRequestId); - request.Headers.Add("return-client-request-id", "true"); - if (ocpDate != null) - { - request.Headers.Add("ocp-date", ocpDate.Value, "R"); - } - return message; - } - internal HttpMessage CreateJobScheduleExistsRequest(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200404); @@ -12681,25 +11948,6 @@ internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesNextPageReques return message; } - internal HttpMessage CreateGetCertificatesNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) - { - var message = _pipeline.CreateMessage(context, ResponseClassifier200); - var request = message.Request; - request.Method = RequestMethod.Get; - var uri = new RawRequestUriBuilder(); - uri.Reset(_endpoint); - uri.AppendRawNextLink(nextLink, false); - request.Uri = uri; - request.Headers.Add("Accept", "application/json"); - request.Headers.Add("client-request-id", message.Request.ClientRequestId); - request.Headers.Add("return-client-request-id", "true"); - if (ocpDate != null) - { - request.Headers.Add("ocp-date", ocpDate.Value, "R"); - } - return message; - } - internal HttpMessage CreateGetJobSchedulesNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs index c4f6c163cc57..8e30c53c5d78 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs @@ -13,13 +13,13 @@ namespace Azure.Compute.Batch /// Client options for BatchClient. public partial class BatchClientOptions : ClientOptions { - private const ServiceVersion LatestVersion = ServiceVersion.V2024_07_01_20_0; + private const ServiceVersion LatestVersion = ServiceVersion.V2025_06_01; /// The version of the service to use. public enum ServiceVersion { - /// Service version "2024-07-01.20.0". - V2024_07_01_20_0 = 1, + /// Service version "2025-06-01". + V2025_06_01 = 1, } internal string Version { get; } @@ -29,7 +29,7 @@ public BatchClientOptions(ServiceVersion version = LatestVersion) { Version = version switch { - ServiceVersion.V2024_07_01_20_0 => "2024-07-01.20.0", + ServiceVersion.V2025_06_01 => "2025-06-01", _ => throw new NotSupportedException() }; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs index 9be8090d6d21..f16107f4ad19 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs @@ -70,8 +70,8 @@ public BatchJob(BatchPoolInfo poolInfo) /// The previous state of the Job. This property is not set if the Job is in its initial Active state. /// The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. /// The execution constraints for the Job. /// Details of a Job Manager Task to be launched when the Job is started. /// The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. @@ -80,7 +80,7 @@ public BatchJob(BatchPoolInfo poolInfo) /// The Pool settings associated with the Job. /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// The execution information for the Job. /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. @@ -145,9 +145,9 @@ internal BatchJob() public DateTimeOffset? PreviousStateTransitionTime { get; } /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. public int? Priority { get; set; } - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. public bool? AllowTaskPreemption { get; set; } - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. public int? MaxParallelTasks { get; set; } /// The execution constraints for the Job. public BatchJobConstraints Constraints { get; set; } @@ -165,7 +165,7 @@ internal BatchJob() public BatchAllTasksCompleteMode? AllTasksCompleteMode { get; set; } /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. public BatchTaskFailureMode? TaskFailureMode { get; } - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. public BatchJobNetworkConfiguration NetworkConfiguration { get; } /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. public IList Metadata { get; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.cs index 36c63ea63d24..0900db21c961 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.cs @@ -65,8 +65,8 @@ public BatchJobCreateOptions(string id, BatchPoolInfo poolInfo) /// The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// Whether Tasks in the Job can define dependencies on each other. The default is false. /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. /// The execution constraints for the Job. /// Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. /// The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. @@ -75,7 +75,7 @@ public BatchJobCreateOptions(string id, BatchPoolInfo poolInfo) /// The Pool on which the Batch service runs the Job's Tasks. /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// Keeps track of any properties unknown to the library. internal BatchJobCreateOptions(string id, string displayName, bool? usesTaskDependencies, int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IList commonEnvironmentSettings, BatchPoolInfo poolInfo, BatchAllTasksCompleteMode? allTasksCompleteMode, BatchTaskFailureMode? taskFailureMode, BatchJobNetworkConfiguration networkConfiguration, IList metadata, IDictionary serializedAdditionalRawData) @@ -112,9 +112,9 @@ internal BatchJobCreateOptions() public bool? UsesTaskDependencies { get; set; } /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. public int? Priority { get; set; } - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. public bool? AllowTaskPreemption { get; set; } - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. public int? MaxParallelTasks { get; set; } /// The execution constraints for the Job. public BatchJobConstraints Constraints { get; set; } @@ -132,7 +132,7 @@ internal BatchJobCreateOptions() public BatchAllTasksCompleteMode? AllTasksCompleteMode { get; set; } /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. public BatchTaskFailureMode? TaskFailureMode { get; set; } - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. public IList Metadata { get; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDefaultOrder.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDefaultOrder.cs new file mode 100644 index 000000000000..056d57ed7081 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDefaultOrder.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchJobDefaultOrder enums. + public readonly partial struct BatchJobDefaultOrder : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchJobDefaultOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NoneValue = "none"; + private const string CreationTimeValue = "creationtime"; + + /// Tasks should be scheduled uniformly from all equal-priority jobs for the pool. + public static BatchJobDefaultOrder None { get; } = new BatchJobDefaultOrder(NoneValue); + /// If jobs have equal priority, tasks from jobs that were created earlier should be scheduled first. + public static BatchJobDefaultOrder CreationTime { get; } = new BatchJobDefaultOrder(CreationTimeValue); + /// Determines if two values are the same. + public static bool operator ==(BatchJobDefaultOrder left, BatchJobDefaultOrder right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchJobDefaultOrder left, BatchJobDefaultOrder right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchJobDefaultOrder(string value) => new BatchJobDefaultOrder(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchJobDefaultOrder other && Equals(other); + /// + public bool Equals(BatchJobDefaultOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs index ca6247e021c5..6a1fd9980677 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs @@ -10,7 +10,7 @@ namespace Azure.Compute.Batch { - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. public partial class BatchJobNetworkConfiguration { /// diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs index ed0457802812..ca10c8be85e9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs @@ -59,13 +59,13 @@ public BatchJobSpecification(BatchPoolInfo poolInfo) /// Initializes a new instance of . /// The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. /// The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// Whether Tasks in the Job can define dependencies on each other. The default is false. /// The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. /// The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. /// The execution constraints for Jobs created under this schedule. /// The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. /// The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. @@ -101,9 +101,9 @@ internal BatchJobSpecification() /// The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. public int? Priority { get; set; } - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. public bool? AllowTaskPreemption { get; set; } - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. public int? MaxParallelTasks { get; set; } /// The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. public string DisplayName { get; set; } @@ -113,7 +113,7 @@ internal BatchJobSpecification() public BatchAllTasksCompleteMode? AllTasksCompleteMode { get; set; } /// The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. public BatchTaskFailureMode? TaskFailureMode { get; set; } - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } /// The execution constraints for Jobs created under this schedule. public BatchJobConstraints Constraints { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateOptions.cs index 4fcc574eafcf..827913289542 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateOptions.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateOptions.cs @@ -53,13 +53,13 @@ public BatchJobUpdateOptions() /// Initializes a new instance of . /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. /// The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. /// The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. /// The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. /// Keeps track of any properties unknown to the library. internal BatchJobUpdateOptions(int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchPoolInfo poolInfo, BatchAllTasksCompleteMode? allTasksCompleteMode, IList metadata, BatchJobNetworkConfiguration networkConfiguration, IDictionary serializedAdditionalRawData) { @@ -76,9 +76,9 @@ internal BatchJobUpdateOptions(int? priority, bool? allowTaskPreemption, int? ma /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. public int? Priority { get; set; } - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. public bool? AllowTaskPreemption { get; set; } - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. public int? MaxParallelTasks { get; set; } /// The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. public BatchJobConstraints Constraints { get; set; } @@ -88,7 +88,7 @@ internal BatchJobUpdateOptions(int? priority, bool? allowTaskPreemption, int? ma public BatchAllTasksCompleteMode? AllTasksCompleteMode { get; set; } /// A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. public IList Metadata { get; } - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs index abee99c2949c..e8bb91573ea0 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs @@ -75,6 +75,11 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("ipAddress"u8); writer.WriteStringValue(IpAddress.ToString()); } + if (Optional.IsDefined(Ipv6Address)) + { + writer.WritePropertyName("ipv6Address"u8); + writer.WriteStringValue(Ipv6Address.ToString()); + } if (Optional.IsDefined(AffinityId)) { writer.WritePropertyName("affinityId"u8); @@ -125,16 +130,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTaskInfo"u8); writer.WriteObjectValue(StartTaskInfo, options); } - if (Optional.IsCollectionDefined(CertificateReferences)) - { - writer.WritePropertyName("certificateReferences"u8); - writer.WriteStartArray(); - foreach (var item in CertificateReferences) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } if (Optional.IsCollectionDefined(Errors)) { writer.WritePropertyName("errors"u8); @@ -210,6 +205,7 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW DateTimeOffset? lastBootTime = default; DateTimeOffset? allocationTime = default; IPAddress ipAddress = default; + IPAddress ipv6Address = default; string affinityId = default; string vmSize = default; int? totalTasksRun = default; @@ -219,7 +215,6 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW IReadOnlyList recentTasks = default; BatchStartTask startTask = default; BatchStartTaskInfo startTaskInfo = default; - IReadOnlyList certificateReferences = default; IReadOnlyList errors = default; bool? isDedicated = default; BatchNodeEndpointConfiguration endpointConfiguration = default; @@ -297,6 +292,15 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW ipAddress = IPAddress.Parse(property.Value.GetString()); continue; } + if (property.NameEquals("ipv6Address"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + ipv6Address = IPAddress.Parse(property.Value.GetString()); + continue; + } if (property.NameEquals("affinityId"u8)) { affinityId = property.Value.GetString(); @@ -375,20 +379,6 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW startTaskInfo = BatchStartTaskInfo.DeserializeBatchStartTaskInfo(property.Value, options); continue; } - if (property.NameEquals("certificateReferences"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); - } - certificateReferences = array; - continue; - } if (property.NameEquals("errors"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -454,6 +444,7 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW lastBootTime, allocationTime, ipAddress, + ipv6Address, affinityId, vmSize, totalTasksRun, @@ -463,7 +454,6 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW recentTasks ?? new ChangeTrackingList(), startTask, startTaskInfo, - certificateReferences ?? new ChangeTrackingList(), errors ?? new ChangeTrackingList(), isDedicated, endpointConfiguration, diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs index 45af2e61763d..8b9c1a0ede6e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs @@ -50,7 +50,6 @@ public partial class BatchNode internal BatchNode() { RecentTasks = new ChangeTrackingList(); - CertificateReferences = new ChangeTrackingList(); Errors = new ChangeTrackingList(); } @@ -63,6 +62,7 @@ internal BatchNode() /// The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. /// The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. + /// The IPv6 address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. This property will not be present if the Pool is not configured for IPv6. /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. @@ -72,19 +72,13 @@ internal BatchNode() /// A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. /// The Task specified to run on the Compute Node as it joins the Pool. /// Runtime information about the execution of the StartTask on the Compute Node. - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of errors that are currently being encountered by the Compute Node. /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. /// The endpoint configuration for the Compute Node. /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. /// Info about the current state of the virtual machine. /// Keeps track of any properties unknown to the library. - internal BatchNode(string id, Uri uri, BatchNodeState? state, SchedulingState? schedulingState, DateTimeOffset? stateTransitionTime, DateTimeOffset? lastBootTime, DateTimeOffset? allocationTime, IPAddress ipAddress, string affinityId, string vmSize, int? totalTasksRun, int? runningTasksCount, int? runningTaskSlotsCount, int? totalTasksSucceeded, IReadOnlyList recentTasks, BatchStartTask startTask, BatchStartTaskInfo startTaskInfo, IReadOnlyList certificateReferences, IReadOnlyList errors, bool? isDedicated, BatchNodeEndpointConfiguration endpointConfiguration, BatchNodeAgentInfo nodeAgentInfo, VirtualMachineInfo virtualMachineInfo, IDictionary serializedAdditionalRawData) + internal BatchNode(string id, Uri uri, BatchNodeState? state, SchedulingState? schedulingState, DateTimeOffset? stateTransitionTime, DateTimeOffset? lastBootTime, DateTimeOffset? allocationTime, IPAddress ipAddress, IPAddress ipv6Address, string affinityId, string vmSize, int? totalTasksRun, int? runningTasksCount, int? runningTaskSlotsCount, int? totalTasksSucceeded, IReadOnlyList recentTasks, BatchStartTask startTask, BatchStartTaskInfo startTaskInfo, IReadOnlyList errors, bool? isDedicated, BatchNodeEndpointConfiguration endpointConfiguration, BatchNodeAgentInfo nodeAgentInfo, VirtualMachineInfo virtualMachineInfo, IDictionary serializedAdditionalRawData) { Id = id; Uri = uri; @@ -94,6 +88,7 @@ internal BatchNode(string id, Uri uri, BatchNodeState? state, SchedulingState? s LastBootTime = lastBootTime; AllocationTime = allocationTime; IpAddress = ipAddress; + Ipv6Address = ipv6Address; AffinityId = affinityId; VmSize = vmSize; TotalTasksRun = totalTasksRun; @@ -103,7 +98,6 @@ internal BatchNode(string id, Uri uri, BatchNodeState? state, SchedulingState? s RecentTasks = recentTasks; StartTask = startTask; StartTaskInfo = startTaskInfo; - CertificateReferences = certificateReferences; Errors = errors; IsDedicated = isDedicated; EndpointConfiguration = endpointConfiguration; @@ -128,6 +122,8 @@ internal BatchNode(string id, Uri uri, BatchNodeState? state, SchedulingState? s public DateTimeOffset? AllocationTime { get; } /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. public IPAddress IpAddress { get; } + /// The IPv6 address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. This property will not be present if the Pool is not configured for IPv6. + public IPAddress Ipv6Address { get; } /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. public string AffinityId { get; } /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). @@ -146,13 +142,6 @@ internal BatchNode(string id, Uri uri, BatchNodeState? state, SchedulingState? s public BatchStartTask StartTask { get; } /// Runtime information about the execution of the StartTask on the Compute Node. public BatchStartTaskInfo StartTaskInfo { get; } - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// - public IReadOnlyList CertificateReferences { get; } /// The list of errors that are currently being encountered by the Compute Node. public IReadOnlyList Errors { get; } /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCommunicationMode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCommunicationMode.cs deleted file mode 100644 index 0b03bccb1fdb..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCommunicationMode.cs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace Azure.Compute.Batch -{ - /// BatchNodeCommunicationMode enums. - public readonly partial struct BatchNodeCommunicationMode : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public BatchNodeCommunicationMode(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string DefaultValue = "default"; - private const string ClassicValue = "classic"; - private const string SimplifiedValue = "simplified"; - - /// The node communication mode is automatically set by the Batch service. - public static BatchNodeCommunicationMode Default { get; } = new BatchNodeCommunicationMode(DefaultValue); - /// Nodes using the classic communication mode require inbound TCP communication on ports 29876 and 29877 from the "BatchNodeManagement.{region}" service tag and outbound TCP communication on port 443 to the "Storage.region" and "BatchNodeManagement.{region}" service tags. - public static BatchNodeCommunicationMode Classic { get; } = new BatchNodeCommunicationMode(ClassicValue); - /// Nodes using the simplified communication mode require outbound TCP communication on port 443 to the "BatchNodeManagement.{region}" service tag. No open inbound ports are required. - public static BatchNodeCommunicationMode Simplified { get; } = new BatchNodeCommunicationMode(SimplifiedValue); - /// Determines if two values are the same. - public static bool operator ==(BatchNodeCommunicationMode left, BatchNodeCommunicationMode right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(BatchNodeCommunicationMode left, BatchNodeCommunicationMode right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator BatchNodeCommunicationMode(string value) => new BatchNodeCommunicationMode(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is BatchNodeCommunicationMode other && Equals(other); - /// - public bool Equals(BatchNodeCommunicationMode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - /// - public override string ToString() => _value; - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs index 607941db187a..3069f0990911 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs @@ -35,6 +35,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit throw new FormatException($"The model {nameof(BatchNodeRemoteLoginSettings)} does not support writing '{format}' format."); } + if (Optional.IsDefined(Ipv6RemoteLoginIpAddress)) + { + writer.WritePropertyName("ipv6RemoteLoginIPAddress"u8); + writer.WriteStringValue(Ipv6RemoteLoginIpAddress.ToString()); + } + if (Optional.IsDefined(Ipv6RemoteLoginPort)) + { + writer.WritePropertyName("ipv6RemoteLoginPort"u8); + writer.WriteNumberValue(Ipv6RemoteLoginPort.Value); + } writer.WritePropertyName("remoteLoginIPAddress"u8); writer.WriteStringValue(RemoteLoginIpAddress.ToString()); writer.WritePropertyName("remoteLoginPort"u8); @@ -76,12 +86,32 @@ internal static BatchNodeRemoteLoginSettings DeserializeBatchNodeRemoteLoginSett { return null; } + IPAddress ipv6RemoteLoginIPAddress = default; + int? ipv6RemoteLoginPort = default; IPAddress remoteLoginIPAddress = default; int remoteLoginPort = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { + if (property.NameEquals("ipv6RemoteLoginIPAddress"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + ipv6RemoteLoginIPAddress = IPAddress.Parse(property.Value.GetString()); + continue; + } + if (property.NameEquals("ipv6RemoteLoginPort"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + ipv6RemoteLoginPort = property.Value.GetInt32(); + continue; + } if (property.NameEquals("remoteLoginIPAddress"u8)) { remoteLoginIPAddress = IPAddress.Parse(property.Value.GetString()); @@ -98,7 +128,7 @@ internal static BatchNodeRemoteLoginSettings DeserializeBatchNodeRemoteLoginSett } } serializedAdditionalRawData = rawDataDictionary; - return new BatchNodeRemoteLoginSettings(remoteLoginIPAddress, remoteLoginPort, serializedAdditionalRawData); + return new BatchNodeRemoteLoginSettings(ipv6RemoteLoginIPAddress, ipv6RemoteLoginPort, remoteLoginIPAddress, remoteLoginPort, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs index d76d4c1ac8b9..95fb5592e189 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs @@ -59,11 +59,15 @@ internal BatchNodeRemoteLoginSettings(IPAddress remoteLoginIpAddress, int remote } /// Initializes a new instance of . + /// The IPv6 address used for remote login to the Compute Node. + /// The port used for remote login to the Compute Node. /// The IP address used for remote login to the Compute Node. /// The port used for remote login to the Compute Node. /// Keeps track of any properties unknown to the library. - internal BatchNodeRemoteLoginSettings(IPAddress remoteLoginIpAddress, int remoteLoginPort, IDictionary serializedAdditionalRawData) + internal BatchNodeRemoteLoginSettings(IPAddress ipv6RemoteLoginIpAddress, int? ipv6RemoteLoginPort, IPAddress remoteLoginIpAddress, int remoteLoginPort, IDictionary serializedAdditionalRawData) { + Ipv6RemoteLoginIpAddress = ipv6RemoteLoginIpAddress; + Ipv6RemoteLoginPort = ipv6RemoteLoginPort; RemoteLoginIpAddress = remoteLoginIpAddress; RemoteLoginPort = remoteLoginPort; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -74,6 +78,10 @@ internal BatchNodeRemoteLoginSettings() { } + /// The IPv6 address used for remote login to the Compute Node. + public IPAddress Ipv6RemoteLoginIpAddress { get; } + /// The port used for remote login to the Compute Node. + public int? Ipv6RemoteLoginPort { get; } /// The IP address used for remote login to the Compute Node. public IPAddress RemoteLoginIpAddress { get; } /// The port used for remote login to the Compute Node. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs index 888a8ffa9b57..191e9efe0175 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs @@ -109,17 +109,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WriteEndArray(); } - if (options.Format != "W" && Optional.IsCollectionDefined(ResourceTags)) - { - writer.WritePropertyName("resourceTags"u8); - writer.WriteStartObject(); - foreach (var item in ResourceTags) - { - writer.WritePropertyName(item.Key); - writer.WriteStringValue(item.Value); - } - writer.WriteEndObject(); - } if (options.Format != "W" && Optional.IsDefined(CurrentDedicatedNodes)) { writer.WritePropertyName("currentDedicatedNodes"u8); @@ -175,16 +164,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } - if (options.Format != "W" && Optional.IsCollectionDefined(CertificateReferences)) - { - writer.WritePropertyName("certificateReferences"u8); - writer.WriteStartArray(); - foreach (var item in CertificateReferences) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } if (options.Format != "W" && Optional.IsCollectionDefined(ApplicationPackageReferences)) { writer.WritePropertyName("applicationPackageReferences"u8); @@ -245,16 +224,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("identity"u8); writer.WriteObjectValue(Identity, options); } - if (Optional.IsDefined(TargetNodeCommunicationMode)) - { - writer.WritePropertyName("targetNodeCommunicationMode"u8); - writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); - } - if (options.Format != "W" && Optional.IsDefined(CurrentNodeCommunicationMode)) - { - writer.WritePropertyName("currentNodeCommunicationMode"u8); - writer.WriteStringValue(CurrentNodeCommunicationMode.Value.ToString()); - } if (Optional.IsDefined(UpgradePolicy)) { writer.WritePropertyName("upgradePolicy"u8); @@ -311,7 +280,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW VirtualMachineConfiguration virtualMachineConfiguration = default; TimeSpan? resizeTimeout = default; IReadOnlyList resizeErrors = default; - IReadOnlyDictionary resourceTags = default; int? currentDedicatedNodes = default; int? currentLowPriorityNodes = default; int? targetDedicatedNodes = default; @@ -323,7 +291,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW bool? enableInterNodeCommunication = default; NetworkConfiguration networkConfiguration = default; BatchStartTask startTask = default; - IReadOnlyList certificateReferences = default; IReadOnlyList applicationPackageReferences = default; int? taskSlotsPerNode = default; BatchTaskSchedulingPolicy taskSchedulingPolicy = default; @@ -332,8 +299,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW BatchPoolStatistics stats = default; IReadOnlyList mountConfiguration = default; BatchPoolIdentity identity = default; - BatchNodeCommunicationMode? targetNodeCommunicationMode = default; - BatchNodeCommunicationMode? currentNodeCommunicationMode = default; UpgradePolicy upgradePolicy = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -458,20 +423,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW resizeErrors = array; continue; } - if (property.NameEquals("resourceTags"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - Dictionary dictionary = new Dictionary(); - foreach (var property0 in property.Value.EnumerateObject()) - { - dictionary.Add(property0.Name, property0.Value.GetString()); - } - resourceTags = dictionary; - continue; - } if (property.NameEquals("currentDedicatedNodes"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -567,20 +518,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } - if (property.NameEquals("certificateReferences"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); - } - certificateReferences = array; - continue; - } if (property.NameEquals("applicationPackageReferences"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -673,24 +610,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW identity = BatchPoolIdentity.DeserializeBatchPoolIdentity(property.Value, options); continue; } - if (property.NameEquals("targetNodeCommunicationMode"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); - continue; - } - if (property.NameEquals("currentNodeCommunicationMode"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - currentNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); - continue; - } if (property.NameEquals("upgradePolicy"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -721,7 +640,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW virtualMachineConfiguration, resizeTimeout, resizeErrors ?? new ChangeTrackingList(), - resourceTags ?? new ChangeTrackingDictionary(), currentDedicatedNodes, currentLowPriorityNodes, targetDedicatedNodes, @@ -733,7 +651,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW enableInterNodeCommunication, networkConfiguration, startTask, - certificateReferences ?? new ChangeTrackingList(), applicationPackageReferences ?? new ChangeTrackingList(), taskSlotsPerNode, taskSchedulingPolicy, @@ -742,8 +659,6 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW stats, mountConfiguration ?? new ChangeTrackingList(), identity, - targetNodeCommunicationMode, - currentNodeCommunicationMode, upgradePolicy, serializedAdditionalRawData); } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs index 7318a38b3688..cf7d6c35f7fc 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs @@ -49,8 +49,6 @@ public partial class BatchPool internal BatchPool() { ResizeErrors = new ChangeTrackingList(); - ResourceTags = new ChangeTrackingDictionary(); - CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); Metadata = new ChangeTrackingList(); @@ -59,7 +57,7 @@ internal BatchPool() /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). - /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// The URL of the Pool. /// The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. /// The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. @@ -68,11 +66,10 @@ internal BatchPool() /// The time at which the Pool entered its current state. /// Whether the Pool is resizing. /// The time at which the Pool entered its current allocation state. - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The virtual machine configuration for the Pool. This property must be specified. /// The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. /// A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. /// The number of dedicated Compute Nodes currently in the Pool. /// The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. /// The desired number of dedicated Compute Nodes in the Pool. @@ -81,28 +78,20 @@ internal BatchPool() /// A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. /// The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. - /// Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. /// The network configuration for the Pool. /// A Task specified to run on each Compute Node as it joins the Pool. - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. - /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - /// The desired node communication mode for the pool. If omitted, the default value is Default. - /// The current state of the pool communication mode. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchPoolState? state, DateTimeOffset? stateTransitionTime, AllocationState? allocationState, DateTimeOffset? allocationStateTransitionTime, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IReadOnlyList resizeErrors, IReadOnlyDictionary resourceTags, int? currentDedicatedNodes, int? currentLowPriorityNodes, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, AutoScaleRun autoScaleRun, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IReadOnlyList certificateReferences, IReadOnlyList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IReadOnlyList userAccounts, IReadOnlyList metadata, BatchPoolStatistics poolStatistics, IReadOnlyList mountConfiguration, BatchPoolIdentity identity, BatchNodeCommunicationMode? targetNodeCommunicationMode, BatchNodeCommunicationMode? currentNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchPoolState? state, DateTimeOffset? stateTransitionTime, AllocationState? allocationState, DateTimeOffset? allocationStateTransitionTime, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IReadOnlyList resizeErrors, int? currentDedicatedNodes, int? currentLowPriorityNodes, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, AutoScaleRun autoScaleRun, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IReadOnlyList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IReadOnlyList userAccounts, IReadOnlyList metadata, BatchPoolStatistics poolStatistics, IReadOnlyList mountConfiguration, BatchPoolIdentity identity, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; @@ -118,7 +107,6 @@ internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeO VirtualMachineConfiguration = virtualMachineConfiguration; ResizeTimeout = resizeTimeout; ResizeErrors = resizeErrors; - ResourceTags = resourceTags; CurrentDedicatedNodes = currentDedicatedNodes; CurrentLowPriorityNodes = currentLowPriorityNodes; TargetDedicatedNodes = targetDedicatedNodes; @@ -130,7 +118,6 @@ internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeO EnableInterNodeCommunication = enableInterNodeCommunication; NetworkConfiguration = networkConfiguration; StartTask = startTask; - CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; TaskSlotsPerNode = taskSlotsPerNode; TaskSchedulingPolicy = taskSchedulingPolicy; @@ -139,15 +126,13 @@ internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeO PoolStatistics = poolStatistics; MountConfiguration = mountConfiguration; Identity = identity; - TargetNodeCommunicationMode = targetNodeCommunicationMode; - CurrentNodeCommunicationMode = currentNodeCommunicationMode; UpgradePolicy = upgradePolicy; _serializedAdditionalRawData = serializedAdditionalRawData; } /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). public string Id { get; } - /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. public string DisplayName { get; } /// The URL of the Pool. public Uri Uri { get; } @@ -165,7 +150,7 @@ internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeO public AllocationState? AllocationState { get; } /// The time at which the Pool entered its current allocation state. public DateTimeOffset? AllocationStateTransitionTime { get; } - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). public string VmSize { get; } /// The virtual machine configuration for the Pool. This property must be specified. public VirtualMachineConfiguration VirtualMachineConfiguration { get; } @@ -173,8 +158,6 @@ internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeO public TimeSpan? ResizeTimeout { get; } /// A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. public IReadOnlyList ResizeErrors { get; } - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. - public IReadOnlyDictionary ResourceTags { get; } /// The number of dedicated Compute Nodes currently in the Pool. public int? CurrentDedicatedNodes { get; } /// The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. @@ -191,19 +174,12 @@ internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeO public TimeSpan? AutoScaleEvaluationInterval { get; } /// The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. public AutoScaleRun AutoScaleRun { get; } - /// Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. public bool? EnableInterNodeCommunication { get; } /// The network configuration for the Pool. public NetworkConfiguration NetworkConfiguration { get; } /// A Task specified to run on each Compute Node as it joins the Pool. public BatchStartTask StartTask { get; } - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// - public IReadOnlyList CertificateReferences { get; } /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. public IReadOnlyList ApplicationPackageReferences { get; } /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. @@ -216,14 +192,10 @@ internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeO public IReadOnlyList Metadata { get; } /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. public BatchPoolStatistics PoolStatistics { get; } - /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. public IReadOnlyList MountConfiguration { get; } /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. public BatchPoolIdentity Identity { get; } - /// The desired node communication mode for the pool. If omitted, the default value is Default. - public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; } - /// The current state of the pool communication mode. - public BatchNodeCommunicationMode? CurrentNodeCommunicationMode { get; } /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. public UpgradePolicy UpgradePolicy { get; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.Serialization.cs index cf08e0cf4e14..1e307b192617 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.Serialization.cs @@ -53,17 +53,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("resizeTimeout"u8); writer.WriteStringValue(ResizeTimeout.Value, "P"); } - if (Optional.IsCollectionDefined(ResourceTags)) - { - writer.WritePropertyName("resourceTags"u8); - writer.WriteStartObject(); - foreach (var item in ResourceTags) - { - writer.WritePropertyName(item.Key); - writer.WriteStringValue(item.Value); - } - writer.WriteEndObject(); - } if (Optional.IsDefined(TargetDedicatedNodes)) { writer.WritePropertyName("targetDedicatedNodes"u8); @@ -104,16 +93,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } - if (Optional.IsCollectionDefined(CertificateReferences)) - { - writer.WritePropertyName("certificateReferences"u8); - writer.WriteStartArray(); - foreach (var item in CertificateReferences) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } if (Optional.IsCollectionDefined(ApplicationPackageReferences)) { writer.WritePropertyName("applicationPackageReferences"u8); @@ -164,11 +143,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WriteEndArray(); } - if (Optional.IsDefined(TargetNodeCommunicationMode)) - { - writer.WritePropertyName("targetNodeCommunicationMode"u8); - writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); - } if (Optional.IsDefined(UpgradePolicy)) { writer.WritePropertyName("upgradePolicy"u8); @@ -216,7 +190,6 @@ internal static BatchPoolCreateOptions DeserializeBatchPoolCreateOptions(JsonEle string vmSize = default; VirtualMachineConfiguration virtualMachineConfiguration = default; TimeSpan? resizeTimeout = default; - IDictionary resourceTags = default; int? targetDedicatedNodes = default; int? targetLowPriorityNodes = default; bool? enableAutoScale = default; @@ -225,14 +198,12 @@ internal static BatchPoolCreateOptions DeserializeBatchPoolCreateOptions(JsonEle bool? enableInterNodeCommunication = default; NetworkConfiguration networkConfiguration = default; BatchStartTask startTask = default; - IList certificateReferences = default; IList applicationPackageReferences = default; int? taskSlotsPerNode = default; BatchTaskSchedulingPolicy taskSchedulingPolicy = default; IList userAccounts = default; IList metadata = default; IList mountConfiguration = default; - BatchNodeCommunicationMode? targetNodeCommunicationMode = default; UpgradePolicy upgradePolicy = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -271,20 +242,6 @@ internal static BatchPoolCreateOptions DeserializeBatchPoolCreateOptions(JsonEle resizeTimeout = property.Value.GetTimeSpan("P"); continue; } - if (property.NameEquals("resourceTags"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - Dictionary dictionary = new Dictionary(); - foreach (var property0 in property.Value.EnumerateObject()) - { - dictionary.Add(property0.Name, property0.Value.GetString()); - } - resourceTags = dictionary; - continue; - } if (property.NameEquals("targetDedicatedNodes"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -353,20 +310,6 @@ internal static BatchPoolCreateOptions DeserializeBatchPoolCreateOptions(JsonEle startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } - if (property.NameEquals("certificateReferences"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); - } - certificateReferences = array; - continue; - } if (property.NameEquals("applicationPackageReferences"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -441,15 +384,6 @@ internal static BatchPoolCreateOptions DeserializeBatchPoolCreateOptions(JsonEle mountConfiguration = array; continue; } - if (property.NameEquals("targetNodeCommunicationMode"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); - continue; - } if (property.NameEquals("upgradePolicy"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -471,7 +405,6 @@ internal static BatchPoolCreateOptions DeserializeBatchPoolCreateOptions(JsonEle vmSize, virtualMachineConfiguration, resizeTimeout, - resourceTags ?? new ChangeTrackingDictionary(), targetDedicatedNodes, targetLowPriorityNodes, enableAutoScale, @@ -480,14 +413,12 @@ internal static BatchPoolCreateOptions DeserializeBatchPoolCreateOptions(JsonEle enableInterNodeCommunication, networkConfiguration, startTask, - certificateReferences ?? new ChangeTrackingList(), applicationPackageReferences ?? new ChangeTrackingList(), taskSlotsPerNode, taskSchedulingPolicy, userAccounts ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingList(), mountConfiguration ?? new ChangeTrackingList(), - targetNodeCommunicationMode, upgradePolicy, serializedAdditionalRawData); } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.cs index 986899023db9..fbb654e268b7 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.cs @@ -56,8 +56,6 @@ public BatchPoolCreateOptions(string id, string vmSize) Id = id; VmSize = vmSize; - ResourceTags = new ChangeTrackingDictionary(); - CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); Metadata = new ChangeTrackingList(); @@ -70,7 +68,6 @@ public BatchPoolCreateOptions(string id, string vmSize) /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). /// The virtual machine configuration for the Pool. This property must be specified. /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. @@ -79,29 +76,21 @@ public BatchPoolCreateOptions(string id, string vmSize) /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. /// The network configuration for the Pool. /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. - /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPoolCreateOptions(string id, string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IDictionary resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPoolCreateOptions(string id, string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IList userAccounts, IList metadata, IList mountConfiguration, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; VmSize = vmSize; VirtualMachineConfiguration = virtualMachineConfiguration; ResizeTimeout = resizeTimeout; - ResourceTags = resourceTags; TargetDedicatedNodes = targetDedicatedNodes; TargetLowPriorityNodes = targetLowPriorityNodes; EnableAutoScale = enableAutoScale; @@ -110,14 +99,12 @@ internal BatchPoolCreateOptions(string id, string displayName, string vmSize, Vi EnableInterNodeCommunication = enableInterNodeCommunication; NetworkConfiguration = networkConfiguration; StartTask = startTask; - CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; TaskSlotsPerNode = taskSlotsPerNode; TaskSchedulingPolicy = taskSchedulingPolicy; UserAccounts = userAccounts; Metadata = metadata; MountConfiguration = mountConfiguration; - TargetNodeCommunicationMode = targetNodeCommunicationMode; UpgradePolicy = upgradePolicy; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -137,8 +124,6 @@ internal BatchPoolCreateOptions() public VirtualMachineConfiguration VirtualMachineConfiguration { get; set; } /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). public TimeSpan? ResizeTimeout { get; set; } - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. - public IDictionary ResourceTags { get; } /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. public int? TargetDedicatedNodes { get; set; } /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. @@ -155,13 +140,6 @@ internal BatchPoolCreateOptions() public NetworkConfiguration NetworkConfiguration { get; set; } /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. public BatchStartTask StartTask { get; set; } - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// - public IList CertificateReferences { get; } /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. public IList ApplicationPackageReferences { get; } /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. @@ -174,8 +152,6 @@ internal BatchPoolCreateOptions() public IList Metadata { get; } /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. public IList MountConfiguration { get; } - /// The desired node communication mode for the pool. If omitted, the default value is Default. - public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. public UpgradePolicy UpgradePolicy { get; set; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityReference.Serialization.cs new file mode 100644 index 000000000000..46d760bb2e4a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityReference.Serialization.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolIdentityReference : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolIdentityReference)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(ResourceId)) + { + writer.WritePropertyName("resourceId"u8); + writer.WriteStringValue(ResourceId); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + BatchPoolIdentityReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolIdentityReference)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolIdentityReference(document.RootElement, options); + } + + internal static BatchPoolIdentityReference DeserializeBatchPoolIdentityReference(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ResourceIdentifier resourceId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("resourceId"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + resourceId = new ResourceIdentifier(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolIdentityReference(resourceId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); + default: + throw new FormatException($"The model {nameof(BatchPoolIdentityReference)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolIdentityReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchPoolIdentityReference(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolIdentityReference)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolIdentityReference FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchPoolIdentityReference(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityReference.cs new file mode 100644 index 000000000000..57d3a0138bdb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityReference.cs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + /// The reference of one of the pool identities to encrypt Disk. This identity will be used to access the key vault. + public partial class BatchPoolIdentityReference + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchPoolIdentityReference() + { + } + + /// Initializes a new instance of . + /// The ARM resource id of the user assigned identity. This reference must be included in the pool identities. + /// Keeps track of any properties unknown to the library. + internal BatchPoolIdentityReference(ResourceIdentifier resourceId, IDictionary serializedAdditionalRawData) + { + ResourceId = resourceId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ARM resource id of the user assigned identity. This reference must be included in the pool identities. + public ResourceIdentifier ResourceId { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.Serialization.cs index 147e49e15bb9..f637340b6271 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.Serialization.cs @@ -39,13 +39,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } - writer.WritePropertyName("certificateReferences"u8); - writer.WriteStartArray(); - foreach (var item in CertificateReferences) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); writer.WritePropertyName("applicationPackageReferences"u8); writer.WriteStartArray(); foreach (var item in ApplicationPackageReferences) @@ -60,11 +53,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WriteObjectValue(item, options); } writer.WriteEndArray(); - if (Optional.IsDefined(TargetNodeCommunicationMode)) - { - writer.WritePropertyName("targetNodeCommunicationMode"u8); - writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); - } if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -103,10 +91,8 @@ internal static BatchPoolReplaceOptions DeserializeBatchPoolReplaceOptions(JsonE return null; } BatchStartTask startTask = default; - IList certificateReferences = default; IList applicationPackageReferences = default; IList metadata = default; - BatchNodeCommunicationMode? targetNodeCommunicationMode = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -120,16 +106,6 @@ internal static BatchPoolReplaceOptions DeserializeBatchPoolReplaceOptions(JsonE startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } - if (property.NameEquals("certificateReferences"u8)) - { - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); - } - certificateReferences = array; - continue; - } if (property.NameEquals("applicationPackageReferences"u8)) { List array = new List(); @@ -150,28 +126,13 @@ internal static BatchPoolReplaceOptions DeserializeBatchPoolReplaceOptions(JsonE metadata = array; continue; } - if (property.NameEquals("targetNodeCommunicationMode"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); - continue; - } if (options.Format != "W") { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolReplaceOptions( - startTask, - certificateReferences, - applicationPackageReferences, - metadata, - targetNodeCommunicationMode, - serializedAdditionalRawData); + return new BatchPoolReplaceOptions(startTask, applicationPackageReferences, metadata, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.cs index ebf7444dae60..466dfbef6f4b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.cs @@ -47,49 +47,28 @@ public partial class BatchPoolReplaceOptions private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// - /// This list replaces any existing Certificate references configured on the Pool. - /// If you specify an empty collection, any existing Certificate references are removed from the Pool. - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. - /// , or is null. - public BatchPoolReplaceOptions(IEnumerable certificateReferences, IEnumerable applicationPackageReferences, IEnumerable metadata) + /// or is null. + public BatchPoolReplaceOptions(IEnumerable applicationPackageReferences, IEnumerable metadata) { - Argument.AssertNotNull(certificateReferences, nameof(certificateReferences)); Argument.AssertNotNull(applicationPackageReferences, nameof(applicationPackageReferences)); Argument.AssertNotNull(metadata, nameof(metadata)); - CertificateReferences = certificateReferences.ToList(); ApplicationPackageReferences = applicationPackageReferences.ToList(); Metadata = metadata.ToList(); } /// Initializes a new instance of . /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. - /// - /// This list replaces any existing Certificate references configured on the Pool. - /// If you specify an empty collection, any existing Certificate references are removed from the Pool. - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. - /// The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. /// Keeps track of any properties unknown to the library. - internal BatchPoolReplaceOptions(BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList metadata, BatchNodeCommunicationMode? targetNodeCommunicationMode, IDictionary serializedAdditionalRawData) + internal BatchPoolReplaceOptions(BatchStartTask startTask, IList applicationPackageReferences, IList metadata, IDictionary serializedAdditionalRawData) { StartTask = startTask; - CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; Metadata = metadata; - TargetNodeCommunicationMode = targetNodeCommunicationMode; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -100,20 +79,9 @@ internal BatchPoolReplaceOptions() /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. public BatchStartTask StartTask { get; set; } - /// - /// This list replaces any existing Certificate references configured on the Pool. - /// If you specify an empty collection, any existing Certificate references are removed from the Pool. - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// - public IList CertificateReferences { get; } /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. public IList ApplicationPackageReferences { get; } /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. public IList Metadata { get; } - /// The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. - public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs index ab70d15c32e5..085a4a11c4a6 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs @@ -61,11 +61,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("resizeTimeout"u8); writer.WriteStringValue(ResizeTimeout.Value, "P"); } - if (Optional.IsDefined(ResourceTags)) - { - writer.WritePropertyName("resourceTags"u8); - writer.WriteStringValue(ResourceTags); - } if (Optional.IsDefined(TargetDedicatedNodes)) { writer.WritePropertyName("targetDedicatedNodes"u8); @@ -106,16 +101,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } - if (Optional.IsCollectionDefined(CertificateReferences)) - { - writer.WritePropertyName("certificateReferences"u8); - writer.WriteStartArray(); - foreach (var item in CertificateReferences) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } if (Optional.IsCollectionDefined(ApplicationPackageReferences)) { writer.WritePropertyName("applicationPackageReferences"u8); @@ -156,11 +141,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WriteEndArray(); } - if (Optional.IsDefined(TargetNodeCommunicationMode)) - { - writer.WritePropertyName("targetNodeCommunicationMode"u8); - writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); - } if (Optional.IsDefined(UpgradePolicy)) { writer.WritePropertyName("upgradePolicy"u8); @@ -209,7 +189,6 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle int? taskSlotsPerNode = default; BatchTaskSchedulingPolicy taskSchedulingPolicy = default; TimeSpan? resizeTimeout = default; - string resourceTags = default; int? targetDedicatedNodes = default; int? targetLowPriorityNodes = default; bool? enableAutoScale = default; @@ -218,12 +197,10 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle bool? enableInterNodeCommunication = default; NetworkConfiguration networkConfiguration = default; BatchStartTask startTask = default; - IList certificateReferences = default; IList applicationPackageReferences = default; IList userAccounts = default; IList metadata = default; IList mountConfiguration = default; - BatchNodeCommunicationMode? targetNodeCommunicationMode = default; UpgradePolicy upgradePolicy = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -275,11 +252,6 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle resizeTimeout = property.Value.GetTimeSpan("P"); continue; } - if (property.NameEquals("resourceTags"u8)) - { - resourceTags = property.Value.GetString(); - continue; - } if (property.NameEquals("targetDedicatedNodes"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -348,20 +320,6 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } - if (property.NameEquals("certificateReferences"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); - } - certificateReferences = array; - continue; - } if (property.NameEquals("applicationPackageReferences"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -418,15 +376,6 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle mountConfiguration = array; continue; } - if (property.NameEquals("targetNodeCommunicationMode"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); - continue; - } if (property.NameEquals("upgradePolicy"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -449,7 +398,6 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle taskSlotsPerNode, taskSchedulingPolicy, resizeTimeout, - resourceTags, targetDedicatedNodes, targetLowPriorityNodes, enableAutoScale, @@ -458,12 +406,10 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle enableInterNodeCommunication, networkConfiguration, startTask, - certificateReferences ?? new ChangeTrackingList(), applicationPackageReferences ?? new ChangeTrackingList(), userAccounts ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingList(), mountConfiguration ?? new ChangeTrackingList(), - targetNodeCommunicationMode, upgradePolicy, serializedAdditionalRawData); } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs index 1872af5f9c49..85037de6d4d1 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs @@ -53,7 +53,6 @@ public BatchPoolSpecification(string vmSize) Argument.AssertNotNull(vmSize, nameof(vmSize)); VmSize = vmSize; - CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); Metadata = new ChangeTrackingList(); @@ -67,7 +66,6 @@ public BatchPoolSpecification(string vmSize) /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). - /// The user-specified tags associated with the pool.The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The Pool automatically resizes according to the formula. The default value is false. @@ -76,19 +74,13 @@ public BatchPoolSpecification(string vmSize) /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. /// The network configuration for the Pool. /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. - /// Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. - /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, TimeSpan? resizeTimeout, string resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, TimeSpan? resizeTimeout, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList applicationPackageReferences, IList userAccounts, IList metadata, IList mountConfiguration, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { DisplayName = displayName; VmSize = vmSize; @@ -96,7 +88,6 @@ internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachin TaskSlotsPerNode = taskSlotsPerNode; TaskSchedulingPolicy = taskSchedulingPolicy; ResizeTimeout = resizeTimeout; - ResourceTags = resourceTags; TargetDedicatedNodes = targetDedicatedNodes; TargetLowPriorityNodes = targetLowPriorityNodes; EnableAutoScale = enableAutoScale; @@ -105,12 +96,10 @@ internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachin EnableInterNodeCommunication = enableInterNodeCommunication; NetworkConfiguration = networkConfiguration; StartTask = startTask; - CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; UserAccounts = userAccounts; Metadata = metadata; MountConfiguration = mountConfiguration; - TargetNodeCommunicationMode = targetNodeCommunicationMode; UpgradePolicy = upgradePolicy; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -132,8 +121,6 @@ internal BatchPoolSpecification() public BatchTaskSchedulingPolicy TaskSchedulingPolicy { get; set; } /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). public TimeSpan? ResizeTimeout { get; set; } - /// The user-specified tags associated with the pool.The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. - public string ResourceTags { get; set; } /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. public int? TargetDedicatedNodes { get; set; } /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. @@ -150,12 +137,6 @@ internal BatchPoolSpecification() public NetworkConfiguration NetworkConfiguration { get; set; } /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. public BatchStartTask StartTask { get; set; } - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. - /// Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// - public IList CertificateReferences { get; } /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. public IList ApplicationPackageReferences { get; } /// The list of user Accounts to be created on each Compute Node in the Pool. @@ -164,8 +145,6 @@ internal BatchPoolSpecification() public IList Metadata { get; } /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. public IList MountConfiguration { get; } - /// The desired node communication mode for the pool. If omitted, the default value is Default. - public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. public UpgradePolicy UpgradePolicy { get; set; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateOptions.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateOptions.Serialization.cs index b950c54e7729..e5842605ba69 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateOptions.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateOptions.Serialization.cs @@ -54,16 +54,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } - if (Optional.IsCollectionDefined(CertificateReferences)) - { - writer.WritePropertyName("certificateReferences"u8); - writer.WriteStartArray(); - foreach (var item in CertificateReferences) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } if (Optional.IsCollectionDefined(ApplicationPackageReferences)) { writer.WritePropertyName("applicationPackageReferences"u8); @@ -89,11 +79,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("virtualMachineConfiguration"u8); writer.WriteObjectValue(VirtualMachineConfiguration, options); } - if (Optional.IsDefined(TargetNodeCommunicationMode)) - { - writer.WritePropertyName("targetNodeCommunicationMode"u8); - writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); - } if (Optional.IsDefined(TaskSlotsPerNode)) { writer.WritePropertyName("taskSlotsPerNode"u8); @@ -109,17 +94,6 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("networkConfiguration"u8); writer.WriteObjectValue(NetworkConfiguration, options); } - if (Optional.IsCollectionDefined(ResourceTags)) - { - writer.WritePropertyName("resourceTags"u8); - writer.WriteStartObject(); - foreach (var item in ResourceTags) - { - writer.WritePropertyName(item.Key); - writer.WriteStringValue(item.Value); - } - writer.WriteEndObject(); - } if (Optional.IsCollectionDefined(UserAccounts)) { writer.WritePropertyName("userAccounts"u8); @@ -186,15 +160,12 @@ internal static BatchPoolUpdateOptions DeserializeBatchPoolUpdateOptions(JsonEle string vmSize = default; bool? enableInterNodeCommunication = default; BatchStartTask startTask = default; - IList certificateReferences = default; IList applicationPackageReferences = default; IList metadata = default; VirtualMachineConfiguration virtualMachineConfiguration = default; - BatchNodeCommunicationMode? targetNodeCommunicationMode = default; int? taskSlotsPerNode = default; BatchTaskSchedulingPolicy taskSchedulingPolicy = default; NetworkConfiguration networkConfiguration = default; - IDictionary resourceTags = default; IList userAccounts = default; IList mountConfiguration = default; UpgradePolicy upgradePolicy = default; @@ -230,20 +201,6 @@ internal static BatchPoolUpdateOptions DeserializeBatchPoolUpdateOptions(JsonEle startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } - if (property.NameEquals("certificateReferences"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); - } - certificateReferences = array; - continue; - } if (property.NameEquals("applicationPackageReferences"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -281,15 +238,6 @@ internal static BatchPoolUpdateOptions DeserializeBatchPoolUpdateOptions(JsonEle virtualMachineConfiguration = VirtualMachineConfiguration.DeserializeVirtualMachineConfiguration(property.Value, options); continue; } - if (property.NameEquals("targetNodeCommunicationMode"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); - continue; - } if (property.NameEquals("taskSlotsPerNode"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -317,20 +265,6 @@ internal static BatchPoolUpdateOptions DeserializeBatchPoolUpdateOptions(JsonEle networkConfiguration = NetworkConfiguration.DeserializeNetworkConfiguration(property.Value, options); continue; } - if (property.NameEquals("resourceTags"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - Dictionary dictionary = new Dictionary(); - foreach (var property0 in property.Value.EnumerateObject()) - { - dictionary.Add(property0.Name, property0.Value.GetString()); - } - resourceTags = dictionary; - continue; - } if (property.NameEquals("userAccounts"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -379,15 +313,12 @@ internal static BatchPoolUpdateOptions DeserializeBatchPoolUpdateOptions(JsonEle vmSize, enableInterNodeCommunication, startTask, - certificateReferences ?? new ChangeTrackingList(), applicationPackageReferences ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingList(), virtualMachineConfiguration, - targetNodeCommunicationMode, taskSlotsPerNode, taskSchedulingPolicy, networkConfiguration, - resourceTags ?? new ChangeTrackingDictionary(), userAccounts ?? new ChangeTrackingList(), mountConfiguration ?? new ChangeTrackingList(), upgradePolicy, diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateOptions.cs index 1d06df687e13..9f48027e3cf9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateOptions.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateOptions.cs @@ -48,10 +48,8 @@ public partial class BatchPoolUpdateOptions /// Initializes a new instance of . public BatchPoolUpdateOptions() { - CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); Metadata = new ChangeTrackingList(); - ResourceTags = new ChangeTrackingDictionary(); UserAccounts = new ChangeTrackingList(); MountConfiguration = new ChangeTrackingList(); } @@ -61,41 +59,28 @@ public BatchPoolUpdateOptions() /// The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).<br /><br />This field can be updated only when the pool is empty. /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.<br /><br />This field can be updated only when the pool is empty. /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. - /// - /// If this element is present, it replaces any existing Certificate references configured on the Pool. - /// If omitted, any existing Certificate references are left unchanged. - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. /// A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. /// The virtual machine configuration for the Pool. This property must be specified.<br /><br />This field can be updated only when the pool is empty. - /// The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.<br /><br />This field can be updated only when the pool is empty. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.<br /><br />This field can be updated only when the pool is empty. /// The network configuration for the Pool. This field can be updated only when the pool is empty. - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.<br /><br />This field can be updated only when the pool is empty. /// The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.<br /><br />This field can be updated only when the pool is empty. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.<br /><br />This field can be updated only when the pool is empty. /// Keeps track of any properties unknown to the library. - internal BatchPoolUpdateOptions(string displayName, string vmSize, bool? enableInterNodeCommunication, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList metadata, VirtualMachineConfiguration virtualMachineConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, NetworkConfiguration networkConfiguration, IDictionary resourceTags, IList userAccounts, IList mountConfiguration, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPoolUpdateOptions(string displayName, string vmSize, bool? enableInterNodeCommunication, BatchStartTask startTask, IList applicationPackageReferences, IList metadata, VirtualMachineConfiguration virtualMachineConfiguration, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, NetworkConfiguration networkConfiguration, IList userAccounts, IList mountConfiguration, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { DisplayName = displayName; VmSize = vmSize; EnableInterNodeCommunication = enableInterNodeCommunication; StartTask = startTask; - CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; Metadata = metadata; VirtualMachineConfiguration = virtualMachineConfiguration; - TargetNodeCommunicationMode = targetNodeCommunicationMode; TaskSlotsPerNode = taskSlotsPerNode; TaskSchedulingPolicy = taskSchedulingPolicy; NetworkConfiguration = networkConfiguration; - ResourceTags = resourceTags; UserAccounts = userAccounts; MountConfiguration = mountConfiguration; UpgradePolicy = upgradePolicy; @@ -110,31 +95,18 @@ internal BatchPoolUpdateOptions(string displayName, string vmSize, bool? enableI public bool? EnableInterNodeCommunication { get; set; } /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. public BatchStartTask StartTask { get; set; } - /// - /// If this element is present, it replaces any existing Certificate references configured on the Pool. - /// If omitted, any existing Certificate references are left unchanged. - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// - public IList CertificateReferences { get; } /// A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. public IList ApplicationPackageReferences { get; } /// A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. public IList Metadata { get; } /// The virtual machine configuration for the Pool. This property must be specified.<br /><br />This field can be updated only when the pool is empty. public VirtualMachineConfiguration VirtualMachineConfiguration { get; set; } - /// The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. - public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.<br /><br />This field can be updated only when the pool is empty. public int? TaskSlotsPerNode { get; set; } /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.<br /><br />This field can be updated only when the pool is empty. public BatchTaskSchedulingPolicy TaskSchedulingPolicy { get; set; } /// The network configuration for the Pool. This field can be updated only when the pool is empty. public NetworkConfiguration NetworkConfiguration { get; set; } - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.<br /><br />This field can be updated only when the pool is empty. - public IDictionary ResourceTags { get; } /// The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. public IList UserAccounts { get; } /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.<br /><br />This field can be updated only when the pool is empty. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.Serialization.cs index 0e727455788c..8ddc79d4fd85 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.Serialization.cs @@ -39,6 +39,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("provision"u8); writer.WriteStringValue(IpAddressProvisioningType.Value.ToString()); } + if (Optional.IsCollectionDefined(IpFamilies)) + { + writer.WritePropertyName("ipFamilies"u8); + writer.WriteStartArray(); + foreach (var item in IpFamilies) + { + writer.WriteStringValue(item.ToString()); + } + writer.WriteEndArray(); + } if (Optional.IsCollectionDefined(IpAddressIds)) { writer.WritePropertyName("ipAddressIds"u8); @@ -54,6 +64,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WriteEndArray(); } + if (Optional.IsCollectionDefined(IpTags)) + { + writer.WritePropertyName("ipTags"u8); + writer.WriteStartArray(); + foreach (var item in IpTags) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -92,7 +112,9 @@ internal static BatchPublicIpAddressConfiguration DeserializeBatchPublicIpAddres return null; } IpAddressProvisioningType? provision = default; + IList ipFamilies = default; IList ipAddressIds = default; + IList ipTags = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -106,6 +128,20 @@ internal static BatchPublicIpAddressConfiguration DeserializeBatchPublicIpAddres provision = new IpAddressProvisioningType(property.Value.GetString()); continue; } + if (property.NameEquals("ipFamilies"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(new IPFamily(item.GetString())); + } + ipFamilies = array; + continue; + } if (property.NameEquals("ipAddressIds"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -127,13 +163,27 @@ internal static BatchPublicIpAddressConfiguration DeserializeBatchPublicIpAddres ipAddressIds = array; continue; } + if (property.NameEquals("ipTags"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(IPTag.DeserializeIPTag(item, options)); + } + ipTags = array; + continue; + } if (options.Format != "W") { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPublicIpAddressConfiguration(provision, ipAddressIds ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new BatchPublicIpAddressConfiguration(provision, ipFamilies ?? new ChangeTrackingList(), ipAddressIds ?? new ChangeTrackingList(), ipTags ?? new ChangeTrackingList(), serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.cs index 2d49cadcfb35..7c398e2e1776 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.cs @@ -49,23 +49,33 @@ public partial class BatchPublicIpAddressConfiguration /// Initializes a new instance of . public BatchPublicIpAddressConfiguration() { + IpFamilies = new ChangeTrackingList(); IpAddressIds = new ChangeTrackingList(); + IpTags = new ChangeTrackingList(); } /// Initializes a new instance of . /// The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + /// The IP families used to specify IP versions available to the pool. IP families are used to determine single-stack or dual-stack pools. For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. /// The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. + /// A list of IP tags associated with the public IP addresses of the Pool. IP tags are used to categorize and filter public IP addresses for billing and management purposes. /// Keeps track of any properties unknown to the library. - internal BatchPublicIpAddressConfiguration(IpAddressProvisioningType? ipAddressProvisioningType, IList ipAddressIds, IDictionary serializedAdditionalRawData) + internal BatchPublicIpAddressConfiguration(IpAddressProvisioningType? ipAddressProvisioningType, IList ipFamilies, IList ipAddressIds, IList ipTags, IDictionary serializedAdditionalRawData) { IpAddressProvisioningType = ipAddressProvisioningType; + IpFamilies = ipFamilies; IpAddressIds = ipAddressIds; + IpTags = ipTags; _serializedAdditionalRawData = serializedAdditionalRawData; } /// The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. public IpAddressProvisioningType? IpAddressProvisioningType { get; set; } + /// The IP families used to specify IP versions available to the pool. IP families are used to determine single-stack or dual-stack pools. For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. + public IList IpFamilies { get; } /// The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. public IList IpAddressIds { get; } + /// A list of IP tags associated with the public IP addresses of the Pool. IP tags are used to categorize and filter public IP addresses for billing and management purposes. + public IList IpTags { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.Serialization.cs index 91380c1a39a8..cdf3ced0b030 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.Serialization.cs @@ -34,6 +34,11 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit throw new FormatException($"The model {nameof(BatchTaskSchedulingPolicy)} does not support writing '{format}' format."); } + if (Optional.IsDefined(JobDefaultOrder)) + { + writer.WritePropertyName("jobDefaultOrder"u8); + writer.WriteStringValue(JobDefaultOrder.Value.ToString()); + } writer.WritePropertyName("nodeFillType"u8); writer.WriteStringValue(NodeFillType.ToString()); if (options.Format != "W" && _serializedAdditionalRawData != null) @@ -73,11 +78,21 @@ internal static BatchTaskSchedulingPolicy DeserializeBatchTaskSchedulingPolicy(J { return null; } + BatchJobDefaultOrder? jobDefaultOrder = default; BatchNodeFillType nodeFillType = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { + if (property.NameEquals("jobDefaultOrder"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobDefaultOrder = new BatchJobDefaultOrder(property.Value.GetString()); + continue; + } if (property.NameEquals("nodeFillType"u8)) { nodeFillType = new BatchNodeFillType(property.Value.GetString()); @@ -89,7 +104,7 @@ internal static BatchTaskSchedulingPolicy DeserializeBatchTaskSchedulingPolicy(J } } serializedAdditionalRawData = rawDataDictionary; - return new BatchTaskSchedulingPolicy(nodeFillType, serializedAdditionalRawData); + return new BatchTaskSchedulingPolicy(jobDefaultOrder, nodeFillType, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.cs index 40078de0754f..95d470733350 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.cs @@ -53,10 +53,12 @@ public BatchTaskSchedulingPolicy(BatchNodeFillType nodeFillType) } /// Initializes a new instance of . + /// The order for scheduling tasks from different jobs with the same priority. If not specified, the default is none. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// Keeps track of any properties unknown to the library. - internal BatchTaskSchedulingPolicy(BatchNodeFillType nodeFillType, IDictionary serializedAdditionalRawData) + internal BatchTaskSchedulingPolicy(BatchJobDefaultOrder? jobDefaultOrder, BatchNodeFillType nodeFillType, IDictionary serializedAdditionalRawData) { + JobDefaultOrder = jobDefaultOrder; NodeFillType = nodeFillType; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -66,6 +68,8 @@ internal BatchTaskSchedulingPolicy() { } + /// The order for scheduling tasks from different jobs with the same priority. If not specified, the default is none. + public BatchJobDefaultOrder? JobDefaultOrder { get; set; } /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. public BatchNodeFillType NodeFillType { get; set; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs index 624bc8ad9d99..ff6342de46a0 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs @@ -82,7 +82,6 @@ public static BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). /// The virtual machine configuration for the Pool. This property must be specified. /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. @@ -91,25 +90,16 @@ public static BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. /// The network configuration for the Pool. /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. - /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// A new instance for mocking. - public static BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, string displayName = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IDictionary resourceTags = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable certificateReferences = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, IEnumerable mountConfiguration = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) + public static BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, string displayName = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, IEnumerable mountConfiguration = null, UpgradePolicy upgradePolicy = null) { - resourceTags ??= new Dictionary(); - certificateReferences ??= new List(); applicationPackageReferences ??= new List(); userAccounts ??= new List(); metadata ??= new List(); @@ -121,7 +111,6 @@ public static BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, st vmSize, virtualMachineConfiguration, resizeTimeout, - resourceTags, targetDedicatedNodes, targetLowPriorityNodes, enableAutoScale, @@ -130,14 +119,12 @@ public static BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, st enableInterNodeCommunication, networkConfiguration, startTask, - certificateReferences?.ToList(), applicationPackageReferences?.ToList(), taskSlotsPerNode, taskSchedulingPolicy, userAccounts?.ToList(), metadata?.ToList(), mountConfiguration?.ToList(), - targetNodeCommunicationMode, upgradePolicy, serializedAdditionalRawData: null); } @@ -168,7 +155,7 @@ public static BatchVmImageReference BatchVmImageReference(string publisher = nul /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). - /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// The URL of the Pool. /// The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. /// The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. @@ -177,11 +164,10 @@ public static BatchVmImageReference BatchVmImageReference(string publisher = nul /// The time at which the Pool entered its current state. /// Whether the Pool is resizing. /// The time at which the Pool entered its current allocation state. - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The virtual machine configuration for the Pool. This property must be specified. /// The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. /// A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. /// The number of dedicated Compute Nodes currently in the Pool. /// The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. /// The desired number of dedicated Compute Nodes in the Pool. @@ -190,32 +176,22 @@ public static BatchVmImageReference BatchVmImageReference(string publisher = nul /// A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. /// The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. - /// Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. /// The network configuration for the Pool. /// A Task specified to run on each Compute Node as it joins the Pool. - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. - /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - /// The desired node communication mode for the pool. If omitted, the default value is Default. - /// The current state of the pool communication mode. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// A new instance for mocking. - public static BatchPool BatchPool(string id = null, string displayName = null, Uri uri = null, ETag? eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchPoolState? state = null, DateTimeOffset? stateTransitionTime = null, AllocationState? allocationState = null, DateTimeOffset? allocationStateTransitionTime = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IEnumerable resizeErrors = null, IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = null, int? currentLowPriorityNodes = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable certificateReferences = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, BatchPoolStatistics poolStatistics = null, IEnumerable mountConfiguration = null, BatchPoolIdentity identity = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, BatchNodeCommunicationMode? currentNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) + public static BatchPool BatchPool(string id = null, string displayName = null, Uri uri = null, ETag? eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchPoolState? state = null, DateTimeOffset? stateTransitionTime = null, AllocationState? allocationState = null, DateTimeOffset? allocationStateTransitionTime = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IEnumerable resizeErrors = null, int? currentDedicatedNodes = null, int? currentLowPriorityNodes = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, BatchPoolStatistics poolStatistics = null, IEnumerable mountConfiguration = null, BatchPoolIdentity identity = null, UpgradePolicy upgradePolicy = null) { resizeErrors ??= new List(); - resourceTags ??= new Dictionary(); - certificateReferences ??= new List(); applicationPackageReferences ??= new List(); userAccounts ??= new List(); metadata ??= new List(); @@ -236,7 +212,6 @@ public static BatchPool BatchPool(string id = null, string displayName = null, U virtualMachineConfiguration, resizeTimeout, resizeErrors?.ToList(), - resourceTags, currentDedicatedNodes, currentLowPriorityNodes, targetDedicatedNodes, @@ -248,7 +223,6 @@ public static BatchPool BatchPool(string id = null, string displayName = null, U enableInterNodeCommunication, networkConfiguration, startTask, - certificateReferences?.ToList(), applicationPackageReferences?.ToList(), taskSlotsPerNode, taskSchedulingPolicy, @@ -257,8 +231,6 @@ public static BatchPool BatchPool(string id = null, string displayName = null, U poolStatistics, mountConfiguration?.ToList(), identity, - targetNodeCommunicationMode, - currentNodeCommunicationMode, upgradePolicy, serializedAdditionalRawData: null); } @@ -476,8 +448,8 @@ public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = /// The previous state of the Job. This property is not set if the Job is in its initial Active state. /// The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. /// The execution constraints for the Job. /// Details of a Job Manager Task to be launched when the Job is started. /// The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. @@ -486,7 +458,7 @@ public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = /// The Pool settings associated with the Job. /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// The execution information for the Job. /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. @@ -598,8 +570,8 @@ public static BatchJobStatistics BatchJobStatistics(Uri uri = null, DateTimeOffs /// The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// Whether Tasks in the Job can define dependencies on each other. The default is false. /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. /// The execution constraints for the Job. /// Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. /// The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. @@ -608,7 +580,7 @@ public static BatchJobStatistics BatchJobStatistics(Uri uri = null, DateTimeOffs /// The Pool on which the Batch service runs the Job's Tasks. /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. - /// The network configuration for the Job. + /// (This property is not available by default. Please contact support for more information) The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// A new instance for mocking. public static BatchJobCreateOptions BatchJobCreateOptions(string id = null, string displayName = null, bool? usesTaskDependencies = null, int? priority = null, bool? allowTaskPreemption = null, int? maxParallelTasks = null, BatchJobConstraints constraints = null, BatchJobManagerTask jobManagerTask = null, BatchJobPreparationTask jobPreparationTask = null, BatchJobReleaseTask jobReleaseTask = null, IEnumerable commonEnvironmentSettings = null, BatchPoolInfo poolInfo = null, BatchAllTasksCompleteMode? allTasksCompleteMode = null, BatchTaskFailureMode? taskFailureMode = null, BatchJobNetworkConfiguration networkConfiguration = null, IEnumerable metadata = null) @@ -778,50 +750,6 @@ public static BatchTaskSlotCounts BatchTaskSlotCounts(int active = default, int serializedAdditionalRawData: null); } - /// Initializes a new instance of . - /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). - /// The algorithm used to derive the thumbprint. This must be sha1. - /// The URL of the Certificate. - /// The state of the Certificate. - /// The time at which the Certificate entered its current state. - /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. - /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. - /// The public part of the Certificate as a base-64 encoded .cer file. - /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. - /// The base64-encoded contents of the Certificate. The maximum size is 10KB. - /// The format of the Certificate data. - /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. - /// A new instance for mocking. - public static BatchCertificate BatchCertificate(string thumbprint = null, string thumbprintAlgorithm = null, Uri uri = null, BatchCertificateState? state = null, DateTimeOffset? stateTransitionTime = null, BatchCertificateState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, string publicData = null, BatchCertificateDeleteError deleteCertificateError = null, BinaryData data = null, BatchCertificateFormat? certificateFormat = null, string password = null) - { - return new BatchCertificate( - thumbprint, - thumbprintAlgorithm, - uri, - state, - stateTransitionTime, - previousState, - previousStateTransitionTime, - publicData, - deleteCertificateError, - data, - certificateFormat, - password, - serializedAdditionalRawData: null); - } - - /// Initializes a new instance of . - /// An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. - /// A message describing the Certificate deletion error, intended to be suitable for display in a user interface. - /// A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. - /// A new instance for mocking. - public static BatchCertificateDeleteError BatchCertificateDeleteError(string code = null, string message = null, IEnumerable values = null) - { - values ??= new List(); - - return new BatchCertificateDeleteError(code, message, values?.ToList(), serializedAdditionalRawData: null); - } - /// Initializes a new instance of . /// A string that uniquely identifies the schedule within the Account. /// The display name for the schedule. @@ -1245,6 +1173,7 @@ public static BatchNodeUserCreateOptions BatchNodeUserCreateOptions(string name /// The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. /// The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. + /// The IPv6 address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. This property will not be present if the Pool is not configured for IPv6. /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. @@ -1254,22 +1183,15 @@ public static BatchNodeUserCreateOptions BatchNodeUserCreateOptions(string name /// A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. /// The Task specified to run on the Compute Node as it joins the Pool. /// Runtime information about the execution of the StartTask on the Compute Node. - /// - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// /// The list of errors that are currently being encountered by the Compute Node. /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. /// The endpoint configuration for the Compute Node. /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. /// Info about the current state of the virtual machine. /// A new instance for mocking. - public static BatchNode BatchNode(string id = null, Uri uri = null, BatchNodeState? state = null, SchedulingState? schedulingState = null, DateTimeOffset? stateTransitionTime = null, DateTimeOffset? lastBootTime = null, DateTimeOffset? allocationTime = null, IPAddress ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = null, int? runningTasksCount = null, int? runningTaskSlotsCount = null, int? totalTasksSucceeded = null, IEnumerable recentTasks = null, BatchStartTask startTask = null, BatchStartTaskInfo startTaskInfo = null, IEnumerable certificateReferences = null, IEnumerable errors = null, bool? isDedicated = null, BatchNodeEndpointConfiguration endpointConfiguration = null, BatchNodeAgentInfo nodeAgentInfo = null, VirtualMachineInfo virtualMachineInfo = null) + public static BatchNode BatchNode(string id = null, Uri uri = null, BatchNodeState? state = null, SchedulingState? schedulingState = null, DateTimeOffset? stateTransitionTime = null, DateTimeOffset? lastBootTime = null, DateTimeOffset? allocationTime = null, IPAddress ipAddress = null, IPAddress ipv6Address = null, string affinityId = null, string vmSize = null, int? totalTasksRun = null, int? runningTasksCount = null, int? runningTaskSlotsCount = null, int? totalTasksSucceeded = null, IEnumerable recentTasks = null, BatchStartTask startTask = null, BatchStartTaskInfo startTaskInfo = null, IEnumerable errors = null, bool? isDedicated = null, BatchNodeEndpointConfiguration endpointConfiguration = null, BatchNodeAgentInfo nodeAgentInfo = null, VirtualMachineInfo virtualMachineInfo = null) { recentTasks ??= new List(); - certificateReferences ??= new List(); errors ??= new List(); return new BatchNode( @@ -1281,6 +1203,7 @@ public static BatchNode BatchNode(string id = null, Uri uri = null, BatchNodeSta lastBootTime, allocationTime, ipAddress, + ipv6Address, affinityId, vmSize, totalTasksRun, @@ -1290,7 +1213,6 @@ public static BatchNode BatchNode(string id = null, Uri uri = null, BatchNodeSta recentTasks?.ToList(), startTask, startTaskInfo, - certificateReferences?.ToList(), errors?.ToList(), isDedicated, endpointConfiguration, @@ -1406,12 +1328,14 @@ public static VirtualMachineInfo VirtualMachineInfo(BatchVmImageReference imageR } /// Initializes a new instance of . + /// The IPv6 address used for remote login to the Compute Node. + /// The port used for remote login to the Compute Node. /// The IP address used for remote login to the Compute Node. /// The port used for remote login to the Compute Node. /// A new instance for mocking. - public static BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(IPAddress remoteLoginIpAddress = null, int remoteLoginPort = default) + public static BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(IPAddress ipv6RemoteLoginIpAddress = null, int? ipv6RemoteLoginPort = null, IPAddress remoteLoginIpAddress = null, int remoteLoginPort = default) { - return new BatchNodeRemoteLoginSettings(remoteLoginIpAddress, remoteLoginPort, serializedAdditionalRawData: null); + return new BatchNodeRemoteLoginSettings(ipv6RemoteLoginIpAddress, ipv6RemoteLoginPort, remoteLoginIpAddress, remoteLoginPort, serializedAdditionalRawData: null); } /// Initializes a new instance of . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.Serialization.cs index 38da1281759e..b857ebeef89b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.Serialization.cs @@ -43,6 +43,11 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WritePropertyName("diskSizeGB"u8); writer.WriteNumberValue(DiskSizeGb); + if (Optional.IsDefined(ManagedDisk)) + { + writer.WritePropertyName("managedDisk"u8); + writer.WriteObjectValue(ManagedDisk, options); + } if (Optional.IsDefined(StorageAccountType)) { writer.WritePropertyName("storageAccountType"u8); @@ -88,6 +93,7 @@ internal static DataDisk DeserializeDataDisk(JsonElement element, ModelReaderWri int lun = default; CachingType? caching = default; int diskSizeGB = default; + ManagedDisk managedDisk = default; StorageAccountType? storageAccountType = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -112,6 +118,15 @@ internal static DataDisk DeserializeDataDisk(JsonElement element, ModelReaderWri diskSizeGB = property.Value.GetInt32(); continue; } + if (property.NameEquals("managedDisk"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + managedDisk = ManagedDisk.DeserializeManagedDisk(property.Value, options); + continue; + } if (property.NameEquals("storageAccountType"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -127,7 +142,13 @@ internal static DataDisk DeserializeDataDisk(JsonElement element, ModelReaderWri } } serializedAdditionalRawData = rawDataDictionary; - return new DataDisk(lun, caching, diskSizeGB, storageAccountType, serializedAdditionalRawData); + return new DataDisk( + lun, + caching, + diskSizeGB, + managedDisk, + storageAccountType, + serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.cs index f18f0f94302e..f75616989fb3 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.cs @@ -62,13 +62,15 @@ public DataDisk(int logicalUnitNumber, int diskSizeGb) /// The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be between 0 and 63, inclusive. /// The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. /// The initial disk size in gigabytes. + /// The managed disk parameters. /// The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". /// Keeps track of any properties unknown to the library. - internal DataDisk(int logicalUnitNumber, CachingType? caching, int diskSizeGb, StorageAccountType? storageAccountType, IDictionary serializedAdditionalRawData) + internal DataDisk(int logicalUnitNumber, CachingType? caching, int diskSizeGb, ManagedDisk managedDisk, StorageAccountType? storageAccountType, IDictionary serializedAdditionalRawData) { LogicalUnitNumber = logicalUnitNumber; Caching = caching; DiskSizeGb = diskSizeGb; + ManagedDisk = managedDisk; StorageAccountType = storageAccountType; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -84,6 +86,8 @@ internal DataDisk() public CachingType? Caching { get; set; } /// The initial disk size in gigabytes. public int DiskSizeGb { get; set; } + /// The managed disk parameters. + public ManagedDisk ManagedDisk { get; set; } /// The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". public StorageAccountType? StorageAccountType { get; set; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskCustomerManagedKey.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskCustomerManagedKey.Serialization.cs new file mode 100644 index 000000000000..4b1b2105be44 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskCustomerManagedKey.Serialization.cs @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class DiskCustomerManagedKey : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DiskCustomerManagedKey)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(IdentityReference)) + { + writer.WritePropertyName("identityReference"u8); + writer.WriteObjectValue(IdentityReference, options); + } + if (Optional.IsDefined(KeyUrl)) + { + writer.WritePropertyName("keyUrl"u8); + writer.WriteStringValue(KeyUrl); + } + if (Optional.IsDefined(RotationToLatestKeyVersionEnabled)) + { + writer.WritePropertyName("rotationToLatestKeyVersionEnabled"u8); + writer.WriteBooleanValue(RotationToLatestKeyVersionEnabled.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + DiskCustomerManagedKey IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DiskCustomerManagedKey)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDiskCustomerManagedKey(document.RootElement, options); + } + + internal static DiskCustomerManagedKey DeserializeDiskCustomerManagedKey(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchPoolIdentityReference identityReference = default; + string keyUrl = default; + bool? rotationToLatestKeyVersionEnabled = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("identityReference"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + identityReference = BatchPoolIdentityReference.DeserializeBatchPoolIdentityReference(property.Value, options); + continue; + } + if (property.NameEquals("keyUrl"u8)) + { + keyUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("rotationToLatestKeyVersionEnabled"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + rotationToLatestKeyVersionEnabled = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new DiskCustomerManagedKey(identityReference, keyUrl, rotationToLatestKeyVersionEnabled, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); + default: + throw new FormatException($"The model {nameof(DiskCustomerManagedKey)} does not support writing '{options.Format}' format."); + } + } + + DiskCustomerManagedKey IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeDiskCustomerManagedKey(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DiskCustomerManagedKey)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static DiskCustomerManagedKey FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeDiskCustomerManagedKey(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskCustomerManagedKey.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskCustomerManagedKey.cs new file mode 100644 index 000000000000..bdfd7e79c7b4 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskCustomerManagedKey.cs @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The Customer Managed Key reference to encrypt the Disk. + public partial class DiskCustomerManagedKey + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public DiskCustomerManagedKey() + { + } + + /// Initializes a new instance of . + /// The reference of one of the pool identities to encrypt Disk. This identity will be used to access the KeyVault. + /// Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value. + /// Set this flag to true to enable auto-updating of the Disk Encryption to the latest key version. Default is false. + /// Keeps track of any properties unknown to the library. + internal DiskCustomerManagedKey(BatchPoolIdentityReference identityReference, string keyUrl, bool? rotationToLatestKeyVersionEnabled, IDictionary serializedAdditionalRawData) + { + IdentityReference = identityReference; + KeyUrl = keyUrl; + RotationToLatestKeyVersionEnabled = rotationToLatestKeyVersionEnabled; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The reference of one of the pool identities to encrypt Disk. This identity will be used to access the KeyVault. + public BatchPoolIdentityReference IdentityReference { get; set; } + /// Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value. + public string KeyUrl { get; set; } + /// Set this flag to true to enable auto-updating of the Disk Encryption to the latest key version. Default is false. + public bool? RotationToLatestKeyVersionEnabled { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.Serialization.cs index a8d62d5ff872..ee740ad11c36 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.Serialization.cs @@ -34,6 +34,11 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit throw new FormatException($"The model {nameof(DiskEncryptionConfiguration)} does not support writing '{format}' format."); } + if (Optional.IsDefined(CustomerManagedKey)) + { + writer.WritePropertyName("customerManagedKey"u8); + writer.WriteObjectValue(CustomerManagedKey, options); + } if (Optional.IsCollectionDefined(Targets)) { writer.WritePropertyName("targets"u8); @@ -81,11 +86,21 @@ internal static DiskEncryptionConfiguration DeserializeDiskEncryptionConfigurati { return null; } + DiskCustomerManagedKey customerManagedKey = default; IList targets = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { + if (property.NameEquals("customerManagedKey"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + customerManagedKey = DiskCustomerManagedKey.DeserializeDiskCustomerManagedKey(property.Value, options); + continue; + } if (property.NameEquals("targets"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -106,7 +121,7 @@ internal static DiskEncryptionConfiguration DeserializeDiskEncryptionConfigurati } } serializedAdditionalRawData = rawDataDictionary; - return new DiskEncryptionConfiguration(targets ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new DiskEncryptionConfiguration(customerManagedKey, targets ?? new ChangeTrackingList(), serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs index 2ad49f0b0d74..fd080ec1a262 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs @@ -56,14 +56,18 @@ public DiskEncryptionConfiguration() } /// Initializes a new instance of . + /// The Customer Managed Key reference to encrypt the OS Disk. Customer Managed Key will encrypt OS Disk by EncryptionAtRest, and by default we will encrypt the data disk as well. It can be used only when the pool is configured with an identity and OsDisk is set as one of the targets of DiskEncryption. /// The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. /// Keeps track of any properties unknown to the library. - internal DiskEncryptionConfiguration(IList targets, IDictionary serializedAdditionalRawData) + internal DiskEncryptionConfiguration(DiskCustomerManagedKey customerManagedKey, IList targets, IDictionary serializedAdditionalRawData) { + CustomerManagedKey = customerManagedKey; Targets = targets; _serializedAdditionalRawData = serializedAdditionalRawData; } + /// The Customer Managed Key reference to encrypt the OS Disk. Customer Managed Key will encrypt OS Disk by EncryptionAtRest, and by default we will encrypt the data disk as well. It can be used only when the pool is configured with an identity and OsDisk is set as one of the targets of DiskEncryption. + public DiskCustomerManagedKey CustomerManagedKey { get; set; } /// The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. public IList Targets { get; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionSetParameters.Serialization.cs similarity index 57% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionSetParameters.Serialization.cs index bff448acedbe..cf259a02e015 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionSetParameters.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchCertificateDeleteError : IUtf8JsonSerializable, IJsonModel + public partial class DiskEncryptionSetParameters : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,31 +28,16 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelR /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchCertificateDeleteError)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(DiskEncryptionSetParameters)} does not support writing '{format}' format."); } - if (Optional.IsDefined(Code)) + if (Optional.IsDefined(Id)) { - writer.WritePropertyName("code"u8); - writer.WriteStringValue(Code); - } - if (Optional.IsDefined(Message)) - { - writer.WritePropertyName("message"u8); - writer.WriteStringValue(Message); - } - if (Optional.IsCollectionDefined(Values)) - { - writer.WritePropertyName("values"u8); - writer.WriteStartArray(); - foreach (var item in Values) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); } if (options.Format != "W" && _serializedAdditionalRawData != null) { @@ -71,19 +56,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchCertificateDeleteError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + DiskEncryptionSetParameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchCertificateDeleteError)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(DiskEncryptionSetParameters)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchCertificateDeleteError(document.RootElement, options); + return DeserializeDiskEncryptionSetParameters(document.RootElement, options); } - internal static BatchCertificateDeleteError DeserializeBatchCertificateDeleteError(JsonElement element, ModelReaderWriterOptions options = null) + internal static DiskEncryptionSetParameters DeserializeDiskEncryptionSetParameters(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -91,35 +76,18 @@ internal static BatchCertificateDeleteError DeserializeBatchCertificateDeleteErr { return null; } - string code = default; - string message = default; - IReadOnlyList values = default; + ResourceIdentifier id = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { - if (property.NameEquals("code"u8)) - { - code = property.Value.GetString(); - continue; - } - if (property.NameEquals("message"u8)) - { - message = property.Value.GetString(); - continue; - } - if (property.NameEquals("values"u8)) + if (property.NameEquals("id"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) { continue; } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(NameValuePair.DeserializeNameValuePair(item, options)); - } - values = array; + id = new ResourceIdentifier(property.Value.GetString()); continue; } if (options.Format != "W") @@ -128,46 +96,46 @@ internal static BatchCertificateDeleteError DeserializeBatchCertificateDeleteErr } } serializedAdditionalRawData = rawDataDictionary; - return new BatchCertificateDeleteError(code, message, values ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new DiskEncryptionSetParameters(id, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchCertificateDeleteError)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(DiskEncryptionSetParameters)} does not support writing '{options.Format}' format."); } } - BatchCertificateDeleteError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + DiskEncryptionSetParameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchCertificateDeleteError(document.RootElement, options); + return DeserializeDiskEncryptionSetParameters(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchCertificateDeleteError)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(DiskEncryptionSetParameters)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchCertificateDeleteError FromResponse(Response response) + internal static DiskEncryptionSetParameters FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchCertificateDeleteError(document.RootElement); + return DeserializeDiskEncryptionSetParameters(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionSetParameters.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionSetParameters.cs new file mode 100644 index 000000000000..ba287cb6de0c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionSetParameters.cs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + /// The ARM resource id of the disk encryption set. + public partial class DiskEncryptionSetParameters + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public DiskEncryptionSetParameters() + { + } + + /// Initializes a new instance of . + /// The ARM resource id of the disk encryption set. The resource must be in the same subscription as the Batch account. + /// Keeps track of any properties unknown to the library. + internal DiskEncryptionSetParameters(ResourceIdentifier id, IDictionary serializedAdditionalRawData) + { + Id = id; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ARM resource id of the disk encryption set. The resource must be in the same subscription as the Batch account. + public ResourceIdentifier Id { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml b/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml index c8919209cc1c..4cb415a5743b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml @@ -31,7 +31,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetApplicationAsync("my_application_id", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); +Response response = await client.GetApplicationAsync("my_application_id", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("id").ToString()); @@ -47,7 +47,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetApplication("my_application_id", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); +Response response = client.GetApplication("my_application_id", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("id").ToString()); @@ -69,7 +69,7 @@ BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool01", "Standard_D { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64"), TargetDedicatedNodes = 2, @@ -86,13 +86,149 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool", "Standard_DC2as_v5") +{ + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-core-g2", + Version = "latest", + }, "batch.node.windows amd64") + { + DataDisks = {new DataDisk(0, 1024) + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + }, + }}, + OsDisk = new BatchOsDisk + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + SecurityProfile = new BatchVmDiskSecurityProfile + { + SecurityEncryptionType = SecurityEncryptionTypes.DiskWithVMGuestState, + }, + }, + }, + SecurityProfile = new SecurityProfile + { + SecurityType = SecurityTypes.ConfidentialVM, + UefiSettings = new BatchUefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }, + }, + }, + TargetDedicatedNodes = 1, +}; +Response response = await client.CreatePoolAsync(pool); +]]> +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool", "Standard_D2ds_v5") +{ + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-core-g2", + Version = "latest", + }, "batch.node.windows amd64") + { + DataDisks = {new DataDisk(0, 1024) + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + }, + }}, + OsDisk = new BatchOsDisk + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + }, + }, + }, + TargetDedicatedNodes = 1, +}; +Response response = await client.CreatePoolAsync(pool); +]]> +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("dualstackpool", "Standard_D2ds_v5") +{ + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "Canonical", + Offer = "ubuntu-24_04-lts", + Sku = "server", + }, "batch.node.ubuntu 20.04"), + ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), + TargetDedicatedNodes = 1, + TargetLowPriorityNodes = 0, + NetworkConfiguration = new NetworkConfiguration + { + EndpointConfiguration = new BatchPoolEndpointConfiguration(new BatchInboundNatPool[] + { + new BatchInboundNatPool("sshpool", InboundEndpointProtocol.Tcp, 22, 40000, 40500) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1000, NetworkSecurityGroupRuleAccess.Allow, "*") + { + SourcePortRanges = {"*"}, + }}, + } + }), + PublicIpAddressConfiguration = new BatchPublicIpAddressConfiguration + { + IpFamilies = { IPFamily.IPv4, IPFamily.IPv6 }, + }, + }, +}; +Response response = await client.CreatePoolAsync(pool); +]]> +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "UbuntuServer", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04"), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), TargetDedicatedNodes = 5, @@ -134,13 +270,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "standard_d2s_v3") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04") { OsDisk = new BatchOsDisk @@ -176,31 +312,7 @@ BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_ VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "UbuntuServer", - Sku = "18_04-lts-gen2", - Version = "latest", - }, "batch.node.ubuntu 18.04"), - ResourceTags = - { - ["TagName1"] = "TagValue1", - ["TagName2"] = "TagValue2" - }, - TargetDedicatedNodes = 1, -}; -Response response = await client.CreatePoolAsync(pool); -]]> -This sample shows how to call CreatePoolAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") -{ - VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference - { - Publisher = "Canonical", - Offer = "UbuntuServer", + Offer = "ubuntu-24_04-lts", Sku = "18_04-lts-gen2", Version = "latest", }, "batch.node.ubuntu 18.04") @@ -225,13 +337,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04"), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), TargetDedicatedNodes = 5, @@ -250,12 +362,12 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", + Offer = "ubuntu-24_04-lts", Sku = "120_04-lts", }, "batch.node.ubuntu 20.04") { @@ -279,13 +391,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04") { Extensions = {new VMExtension("batchextension1", "Microsoft.Azure.KeyVault", "KeyVaultForLinux") @@ -308,7 +420,6 @@ BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Simplified, }; Response response = await client.CreatePoolAsync(pool); ]]> @@ -318,13 +429,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_A1_v2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -353,7 +464,7 @@ BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool01", "Standard_D { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64"), TargetDedicatedNodes = 2, @@ -370,13 +481,149 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool", "Standard_DC2as_v5") +{ + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-core-g2", + Version = "latest", + }, "batch.node.windows amd64") + { + DataDisks = {new DataDisk(0, 1024) + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + }, + }}, + OsDisk = new BatchOsDisk + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + SecurityProfile = new BatchVmDiskSecurityProfile + { + SecurityEncryptionType = SecurityEncryptionTypes.DiskWithVMGuestState, + }, + }, + }, + SecurityProfile = new SecurityProfile + { + SecurityType = SecurityTypes.ConfidentialVM, + UefiSettings = new BatchUefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }, + }, + }, + TargetDedicatedNodes = 1, +}; +Response response = client.CreatePool(pool); +]]> +This sample shows how to call CreatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool", "Standard_D2ds_v5") +{ + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-core-g2", + Version = "latest", + }, "batch.node.windows amd64") + { + DataDisks = {new DataDisk(0, 1024) + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + }, + }}, + OsDisk = new BatchOsDisk + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + }, + }, + }, + TargetDedicatedNodes = 1, +}; +Response response = client.CreatePool(pool); +]]> +This sample shows how to call CreatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("dualstackpool", "Standard_D2ds_v5") +{ + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "Canonical", + Offer = "ubuntu-24_04-lts", + Sku = "server", + }, "batch.node.ubuntu 20.04"), + ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), + TargetDedicatedNodes = 1, + TargetLowPriorityNodes = 0, + NetworkConfiguration = new NetworkConfiguration + { + EndpointConfiguration = new BatchPoolEndpointConfiguration(new BatchInboundNatPool[] + { + new BatchInboundNatPool("sshpool", InboundEndpointProtocol.Tcp, 22, 40000, 40500) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1000, NetworkSecurityGroupRuleAccess.Allow, "*") + { + SourcePortRanges = {"*"}, + }}, + } + }), + PublicIpAddressConfiguration = new BatchPublicIpAddressConfiguration + { + IpFamilies = { IPFamily.IPv4, IPFamily.IPv6 }, + }, + }, +}; +Response response = client.CreatePool(pool); +]]> +This sample shows how to call CreatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "UbuntuServer", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04"), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), TargetDedicatedNodes = 5, @@ -418,13 +665,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "standard_d2s_v3") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04") { OsDisk = new BatchOsDisk @@ -460,31 +707,7 @@ BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_ VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "UbuntuServer", - Sku = "18_04-lts-gen2", - Version = "latest", - }, "batch.node.ubuntu 18.04"), - ResourceTags = - { - ["TagName1"] = "TagValue1", - ["TagName2"] = "TagValue2" - }, - TargetDedicatedNodes = 1, -}; -Response response = client.CreatePool(pool); -]]> -This sample shows how to call CreatePool. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") -{ - VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference - { - Publisher = "Canonical", - Offer = "UbuntuServer", + Offer = "ubuntu-24_04-lts", Sku = "18_04-lts-gen2", Version = "latest", }, "batch.node.ubuntu 18.04") @@ -509,13 +732,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04"), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), TargetDedicatedNodes = 5, @@ -534,12 +757,12 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", + Offer = "ubuntu-24_04-lts", Sku = "120_04-lts", }, "batch.node.ubuntu 20.04") { @@ -563,13 +786,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04") { Extensions = {new VMExtension("batchextension1", "Microsoft.Azure.KeyVault", "KeyVaultForLinux") @@ -592,7 +815,6 @@ BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Simplified, }; Response response = client.CreatePool(pool); ]]> @@ -602,13 +824,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_A1_v2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -641,7 +863,7 @@ using RequestContent content = RequestContent.Create(new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -664,23 +886,210 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { - id = "pool2", - vmSize = "standard_a1", + id = "pool", + vmSize = "Standard_DC2as_v5", virtualMachineConfiguration = new { imageReference = new { - publisher = "Canonical", - offer = "UbuntuServer", - sku = "20_04-lts", + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2019-datacenter-core-g2", + version = "latest", }, - nodeAgentSKUId = "batch.node.ubuntu 20.04", - }, - mountConfiguration = new object[] - { - new + osDisk = new { - azureBlobFileSystemConfiguration = new + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + securityProfile = new + { + securityEncryptionType = "DiskWithVMGuestState", + }, + }, + }, + dataDisks = new object[] + { + new + { + lun = 0, + diskSizeGB = 1024, + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + }, + } + }, + securityProfile = new + { + securityType = "confidentialVM", + uefiSettings = new + { + vTpmEnabled = true, + secureBootEnabled = true, + }, + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 1, +}); +Response response = await client.CreatePoolAsync(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "pool", + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2019-datacenter-core-g2", + version = "latest", + }, + osDisk = new + { + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + }, + }, + dataDisks = new object[] + { + new + { + lun = 0, + diskSizeGB = 1024, + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + }, + } + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 1, +}); +Response response = await client.CreatePoolAsync(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "dualstackpool", + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", + }, + nodeAgentSKUId = "batch.node.ubuntu 20.04", + }, + networkConfiguration = new + { + publicIPAddressConfiguration = new + { + ipFamilies = new object[] + { + "IPv4", + "IPv6" + }, + }, + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + backendPort = 22, + frontendPortRangeStart = 40000, + frontendPortRangeEnd = 40500, + name = "sshpool", + protocol = "tcp", + networkSecurityGroupRules = new object[] + { + new + { + access = "allow", + priority = 1000, + sourceAddressPrefix = "*", + sourcePortRanges = new object[] + { + "*" + }, + } + }, + } + }, + }, + }, + resizeTimeout = "PT15M", + targetDedicatedNodes = 1, + targetLowPriorityNodes = 0, +}); +Response response = await client.CreatePoolAsync(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "pool2", + vmSize = "Standard_D4d_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", + }, + nodeAgentSKUId = "batch.node.ubuntu 20.04", + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new { accountName = "accountName", containerName = "blobContainerName", @@ -743,14 +1152,14 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "mypool001", - vmSize = "standard_d2s_v3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, osDisk = new { @@ -792,39 +1201,7 @@ using RequestContent content = RequestContent.Create(new imageReference = new { publisher = "Canonical", - offer = "UbuntuServer", - sku = "18_04-lts-gen2", - version = "latest", - }, - nodeAgentSKUId = "batch.node.ubuntu 18.04", - }, - targetDedicatedNodes = 1, - resourceTags = new - { - TagName1 = "TagValue1", - TagName2 = "TagValue2", - }, -}); -Response response = await client.CreatePoolAsync(content); - -Console.WriteLine(response.Status); -]]> -This sample shows how to call CreatePoolAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - id = "mypool001", - vmSize = "STANDARD_DC2s_V2", - virtualMachineConfiguration = new - { - imageReference = new - { - publisher = "Canonical", - offer = "UbuntuServer", + offer = "ubuntu-24_04-lts", sku = "18_04-lts-gen2", version = "latest", }, @@ -854,14 +1231,14 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", }, @@ -897,13 +1274,13 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", + offer = "ubuntu-24_04-lts", sku = "120_04-lts", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", @@ -939,14 +1316,14 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", extensions = new object[] @@ -985,13 +1362,148 @@ using RequestContent content = RequestContent.Create(new value = "myvalue", } }, - targetNodeCommunicationMode = "simplified", }); -Response response = await client.CreatePoolAsync(content); +Response response = await client.CreatePoolAsync(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "mypool002", + vmSize = "Standard_D4d_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2025-datacenter-smalldisk", + version = "latest", + }, + windowsConfiguration = new + { + enableAutomaticUpdates = false, + }, + serviceArtifactReference = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/galleries/myGallery/serviceArtifacts/myServiceArtifact/vmArtifactsProfiles/vmArtifactsProfile", + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 2, +}); +Response response = await client.CreatePoolAsync(content); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "mypool01", + vmSize = "Standard_D1_v2", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2025-datacenter-smalldisk", + version = "latest", + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 2, + networkConfiguration = new + { + enableAcceleratedNetworking = true, + }, +}); +Response response = client.CreatePool(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "pool", + vmSize = "Standard_DC2as_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2019-datacenter-core-g2", + version = "latest", + }, + osDisk = new + { + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + securityProfile = new + { + securityEncryptionType = "DiskWithVMGuestState", + }, + }, + }, + dataDisks = new object[] + { + new + { + lun = 0, + diskSizeGB = 1024, + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + }, + } + }, + securityProfile = new + { + securityType = "confidentialVM", + uefiSettings = new + { + vTpmEnabled = true, + secureBootEnabled = true, + }, + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 1, +}); +Response response = client.CreatePool(content); Console.WriteLine(response.Status); ]]> -This sample shows how to call CreatePoolAsync. +This sample shows how to call CreatePool. "); TokenCredential credential = new DefaultAzureCredential(); @@ -999,36 +1511,52 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { - id = "mypool002", - vmSize = "Standard_A1_v2", + id = "pool", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2019-datacenter-core-g2", version = "latest", }, - windowsConfiguration = new + osDisk = new { - enableAutomaticUpdates = false, + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + }, }, - serviceArtifactReference = new + dataDisks = new object[] { - id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/galleries/myGallery/serviceArtifacts/myServiceArtifact/vmArtifactsProfiles/vmArtifactsProfile", + new + { + lun = 0, + diskSizeGB = 1024, + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + }, + } }, nodeAgentSKUId = "batch.node.windows amd64", }, - targetDedicatedNodes = 2, + targetDedicatedNodes = 1, }); -Response response = await client.CreatePoolAsync(content); +Response response = client.CreatePool(content); Console.WriteLine(response.Status); -]]> - - - +]]> This sample shows how to call CreatePool. "); @@ -1037,24 +1565,59 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { - id = "mypool01", - vmSize = "Standard_D1_v2", + id = "dualstackpool", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { - publisher = "MicrosoftWindowsServer", - offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", - version = "latest", + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", }, - nodeAgentSKUId = "batch.node.windows amd64", + nodeAgentSKUId = "batch.node.ubuntu 20.04", }, - targetDedicatedNodes = 2, networkConfiguration = new { - enableAcceleratedNetworking = true, + publicIPAddressConfiguration = new + { + ipFamilies = new object[] + { + "IPv4", + "IPv6" + }, + }, + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + backendPort = 22, + frontendPortRangeStart = 40000, + frontendPortRangeEnd = 40500, + name = "sshpool", + protocol = "tcp", + networkSecurityGroupRules = new object[] + { + new + { + access = "allow", + priority = 1000, + sourceAddressPrefix = "*", + sourcePortRanges = new object[] + { + "*" + }, + } + }, + } + }, + }, }, + resizeTimeout = "PT15M", + targetDedicatedNodes = 1, + targetLowPriorityNodes = 0, }); Response response = client.CreatePool(content); @@ -1069,14 +1632,14 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "UbuntuServer", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", }, @@ -1147,14 +1710,14 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "mypool001", - vmSize = "standard_d2s_v3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, osDisk = new { @@ -1196,39 +1759,7 @@ using RequestContent content = RequestContent.Create(new imageReference = new { publisher = "Canonical", - offer = "UbuntuServer", - sku = "18_04-lts-gen2", - version = "latest", - }, - nodeAgentSKUId = "batch.node.ubuntu 18.04", - }, - targetDedicatedNodes = 1, - resourceTags = new - { - TagName1 = "TagValue1", - TagName2 = "TagValue2", - }, -}); -Response response = client.CreatePool(content); - -Console.WriteLine(response.Status); -]]> -This sample shows how to call CreatePool. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - id = "mypool001", - vmSize = "STANDARD_DC2s_V2", - virtualMachineConfiguration = new - { - imageReference = new - { - publisher = "Canonical", - offer = "UbuntuServer", + offer = "ubuntu-24_04-lts", sku = "18_04-lts-gen2", version = "latest", }, @@ -1258,14 +1789,14 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", }, @@ -1301,13 +1832,13 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", + offer = "ubuntu-24_04-lts", sku = "120_04-lts", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", @@ -1343,14 +1874,14 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", extensions = new object[] @@ -1389,7 +1920,6 @@ using RequestContent content = RequestContent.Create(new value = "myvalue", } }, - targetNodeCommunicationMode = "simplified", }); Response response = client.CreatePool(content); @@ -1404,14 +1934,14 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { id = "mypool002", - vmSize = "Standard_A1_v2", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, windowsConfiguration = new @@ -1455,6 +1985,30 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); +Response response = await client.GetPoolAsync("pool"); +]]> +This sample shows how to call GetPoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync("pool"); +]]> +This sample shows how to call GetPoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync("pool"); +]]> +This sample shows how to call GetPoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + Response response = await client.GetPoolAsync("mypool001"); ]]> This sample shows how to call GetPoolAsync. @@ -1506,6 +2060,30 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); +Response response = client.GetPool("pool"); +]]> +This sample shows how to call GetPool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetPool("pool"); +]]> +This sample shows how to call GetPool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetPool("pool"); +]]> +This sample shows how to call GetPool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + Response response = client.GetPool("mypool001"); ]]> This sample shows how to call GetPool. @@ -1552,7 +2130,40 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetPoolAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetPoolAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetPoolAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -1574,7 +2185,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -1621,7 +2232,40 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetPool and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetPool and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetPool and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -1643,7 +2287,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -1688,6 +2332,47 @@ using RequestContent content = RequestContent.Create(new }); Response response = await client.UpdatePoolAsync("poolId", content); +Console.WriteLine(response.Status); +]]> +This sample shows how to call UpdatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", + version = "latest", + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk", + "temporarydisk" + }, + customerManagedKey = new + { + keyUrl = "https:///keys//", + identityReference = new + { + resourceId = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1", + }, + }, + }, + nodeAgentSKUId = "batch.node.ubuntu 20.04", + }, +}); +Response response = await client.UpdatePoolAsync("poolId", content); + Console.WriteLine(response.Status); ]]> @@ -1708,6 +2393,47 @@ using RequestContent content = RequestContent.Create(new }); Response response = client.UpdatePool("poolId", content); +Console.WriteLine(response.Status); +]]> +This sample shows how to call UpdatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", + version = "latest", + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk", + "temporarydisk" + }, + customerManagedKey = new + { + keyUrl = "https:///keys//", + identityReference = new + { + resourceId = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1", + }, + }, + }, + nodeAgentSKUId = "batch.node.ubuntu 20.04", + }, +}); +Response response = client.UpdatePool("poolId", content); + Console.WriteLine(response.Status); ]]> @@ -1873,7 +2599,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty(), Array.Empty()) +BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -1888,7 +2614,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty(), Array.Empty()) +BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -1909,7 +2635,6 @@ using RequestContent content = RequestContent.Create(new { commandLine = "/bin/bash -c 'echo start task'", }, - certificateReferences = Array.Empty(), applicationPackageReferences = Array.Empty(), metadata = Array.Empty(), }); @@ -1932,7 +2657,6 @@ using RequestContent content = RequestContent.Create(new { commandLine = "/bin/bash -c 'echo start task'", }, - certificateReferences = Array.Empty(), applicationPackageReferences = Array.Empty(), metadata = Array.Empty(), }); @@ -1971,7 +2695,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetJobAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = await client.GetJobAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -1985,7 +2709,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetJob("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = client.GetJob("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -2169,13 +2893,13 @@ BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("STANDARD_D2S_V3") + Pool = new BatchPoolSpecification("Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -2217,14 +2941,7 @@ BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo MaxTaskRetryCount = 2, WaitForSuccess = true, }, - CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") - { - StoreLocation = BatchCertificateStoreLocation.LocalMachine, - StoreName = "Root", - Visibility = {BatchCertificateVisibility.Task}, - }}, Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, }) @@ -2301,13 +3018,13 @@ BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("STANDARD_D2S_V3") + Pool = new BatchPoolSpecification("Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -2349,14 +3066,7 @@ BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo MaxTaskRetryCount = 2, WaitForSuccess = true, }, - CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") - { - StoreLocation = BatchCertificateStoreLocation.LocalMachine, - StoreName = "Root", - Visibility = {BatchCertificateVisibility.Task}, - }}, Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, }) @@ -2492,14 +3202,14 @@ using RequestContent content = RequestContent.Create(new poolLifetimeOption = "job", pool = new { - vmSize = "STANDARD_D2S_V3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -2552,20 +3262,6 @@ using RequestContent content = RequestContent.Create(new maxTaskRetryCount = 2, waitForSuccess = true, }, - certificateReferences = new object[] - { - new - { - thumbprint = "0123456789abcdef0123456789abcdef01234567", - thumbprintAlgorithm = "sha1", - storeLocation = "localmachine", - storeName = "Root", - visibility = new object[] - { - "task" - }, - } - }, metadata = new object[] { new @@ -2574,7 +3270,6 @@ using RequestContent content = RequestContent.Create(new value = "myvalue", } }, - targetNodeCommunicationMode = "default", }, }, }, @@ -2679,14 +3374,14 @@ using RequestContent content = RequestContent.Create(new poolLifetimeOption = "job", pool = new { - vmSize = "STANDARD_D2S_V3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -2739,20 +3434,6 @@ using RequestContent content = RequestContent.Create(new maxTaskRetryCount = 2, waitForSuccess = true, }, - certificateReferences = new object[] - { - new - { - thumbprint = "0123456789abcdef0123456789abcdef01234567", - thumbprintAlgorithm = "sha1", - storeLocation = "localmachine", - storeName = "Root", - visibility = new object[] - { - "task" - }, - } - }, metadata = new object[] { new @@ -2761,7 +3442,6 @@ using RequestContent content = RequestContent.Create(new value = "myvalue", } }, - targetNodeCommunicationMode = "default", }, }, }, @@ -2809,7 +3489,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetJobTaskCountsAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); +Response response = await client.GetJobTaskCountsAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); @@ -2832,7 +3512,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetJobTaskCounts("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); +Response response = client.GetJobTaskCounts("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); @@ -2845,160 +3525,6 @@ Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").To Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); -]]> - - - -This sample shows how to call CreateCertificateAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", BinaryData.FromObjectAsJson("U3dhZ2dlciByb2Nrcw==")) -{ - CertificateFormat = BatchCertificateFormat.Pfx, - Password = "", -}; -Response response = await client.CreateCertificateAsync(certificate); -]]> - - - -This sample shows how to call CreateCertificate. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", BinaryData.FromObjectAsJson("U3dhZ2dlciByb2Nrcw==")) -{ - CertificateFormat = BatchCertificateFormat.Pfx, - Password = "", -}; -Response response = client.CreateCertificate(certificate); -]]> - - - -This sample shows how to call CreateCertificateAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - thumbprintAlgorithm = "sha1", - thumbprint = "0123456789abcdef0123456789abcdef01234567", - data = "U3dhZ2dlciByb2Nrcw==", - certificateFormat = "pfx", - password = "", -}); -Response response = await client.CreateCertificateAsync(content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call CreateCertificate. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - thumbprintAlgorithm = "sha1", - thumbprint = "0123456789abcdef0123456789abcdef01234567", - data = "U3dhZ2dlciByb2Nrcw==", - certificateFormat = "pfx", - password = "", -}); -Response response = client.CreateCertificate(content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call CancelCertificateDeletionAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.CancelCertificateDeletionAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call CancelCertificateDeletion. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.CancelCertificateDeletion("sha1", "0123456789abcdef0123456789abcdef01234567"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call GetCertificateAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); -]]> - - - -This sample shows how to call GetCertificate. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); -]]> - - - -This sample shows how to call GetCertificateAsync and parse the result. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); - -JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; -Console.WriteLine(result.GetProperty("thumbprint").ToString()); -Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); -Console.WriteLine(result.GetProperty("data").ToString()); -]]> - - - -This sample shows how to call GetCertificate and parse the result. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); - -JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; -Console.WriteLine(result.GetProperty("thumbprint").ToString()); -Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); -Console.WriteLine(result.GetProperty("data").ToString()); ]]> @@ -3031,7 +3557,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetJobScheduleAsync("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = await client.GetJobScheduleAsync("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); @@ -3045,7 +3571,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetJobSchedule("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = client.GetJobSchedule("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); @@ -3324,8 +3850,8 @@ BatchClient client = new BatchClient(endpoint, credential); BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { - DoNotRunUntil = DateTimeOffset.Parse("2014-09-10T02:30:00.000Z"), - DoNotRunAfter = DateTimeOffset.Parse("2014-09-10T06:30:00.000Z"), + DoNotRunUntil = DateTimeOffset.Parse("2025-09-10T02:30:00.000Z"), + DoNotRunAfter = DateTimeOffset.Parse("2025-09-10T06:30:00.000Z"), StartWindow = XmlConvert.ToTimeSpan("PT1M"), RecurrenceInterval = XmlConvert.ToTimeSpan("PT5M"), }, new BatchJobSpecification(new BatchPoolInfo @@ -3333,13 +3859,13 @@ BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("j AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("STANDARD_D2S_V3") + Pool = new BatchPoolSpecification("Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -3381,14 +3907,7 @@ BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("j MaxTaskRetryCount = 2, WaitForSuccess = true, }, - CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") - { - StoreLocation = BatchCertificateStoreLocation.LocalMachine, - StoreName = "Root", - Visibility = {BatchCertificateVisibility.Task}, - }}, Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, }) @@ -3464,8 +3983,8 @@ BatchClient client = new BatchClient(endpoint, credential); BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { - DoNotRunUntil = DateTimeOffset.Parse("2014-09-10T02:30:00.000Z"), - DoNotRunAfter = DateTimeOffset.Parse("2014-09-10T06:30:00.000Z"), + DoNotRunUntil = DateTimeOffset.Parse("2025-09-10T02:30:00.000Z"), + DoNotRunAfter = DateTimeOffset.Parse("2025-09-10T06:30:00.000Z"), StartWindow = XmlConvert.ToTimeSpan("PT1M"), RecurrenceInterval = XmlConvert.ToTimeSpan("PT5M"), }, new BatchJobSpecification(new BatchPoolInfo @@ -3473,13 +3992,13 @@ BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("j AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("STANDARD_D2S_V3") + Pool = new BatchPoolSpecification("Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -3521,14 +4040,7 @@ BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("j MaxTaskRetryCount = 2, WaitForSuccess = true, }, - CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") - { - StoreLocation = BatchCertificateStoreLocation.LocalMachine, - StoreName = "Root", - Visibility = {BatchCertificateVisibility.Task}, - }}, Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, }) @@ -3617,8 +4129,8 @@ using RequestContent content = RequestContent.Create(new id = "jobScheduleId", schedule = new { - doNotRunUntil = "2014-09-10T02:30:00.000Z", - doNotRunAfter = "2014-09-10T06:30:00.000Z", + doNotRunUntil = "2025-09-10T02:30:00.000Z", + doNotRunAfter = "2025-09-10T06:30:00.000Z", startWindow = "PT1M", recurrenceInterval = "PT5M", }, @@ -3681,14 +4193,14 @@ using RequestContent content = RequestContent.Create(new poolLifetimeOption = "jobschedule", pool = new { - vmSize = "STANDARD_D2S_V3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -3741,20 +4253,6 @@ using RequestContent content = RequestContent.Create(new maxTaskRetryCount = 2, waitForSuccess = true, }, - certificateReferences = new object[] - { - new - { - thumbprint = "0123456789abcdef0123456789abcdef01234567", - thumbprintAlgorithm = "sha1", - storeLocation = "localmachine", - storeName = "Root", - visibility = new object[] - { - "task" - }, - } - }, metadata = new object[] { new @@ -3763,7 +4261,6 @@ using RequestContent content = RequestContent.Create(new value = "myvalue", } }, - targetNodeCommunicationMode = "default", }, }, }, @@ -3820,8 +4317,8 @@ using RequestContent content = RequestContent.Create(new id = "jobScheduleId", schedule = new { - doNotRunUntil = "2014-09-10T02:30:00.000Z", - doNotRunAfter = "2014-09-10T06:30:00.000Z", + doNotRunUntil = "2025-09-10T02:30:00.000Z", + doNotRunAfter = "2025-09-10T06:30:00.000Z", startWindow = "PT1M", recurrenceInterval = "PT5M", }, @@ -3884,14 +4381,14 @@ using RequestContent content = RequestContent.Create(new poolLifetimeOption = "jobschedule", pool = new { - vmSize = "STANDARD_D2S_V3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -3944,20 +4441,6 @@ using RequestContent content = RequestContent.Create(new maxTaskRetryCount = 2, waitForSuccess = true, }, - certificateReferences = new object[] - { - new - { - thumbprint = "0123456789abcdef0123456789abcdef01234567", - thumbprintAlgorithm = "sha1", - storeLocation = "localmachine", - storeName = "Root", - visibility = new object[] - { - "task" - }, - } - }, metadata = new object[] { new @@ -3966,7 +4449,6 @@ using RequestContent content = RequestContent.Create(new value = "myvalue", } }, - targetNodeCommunicationMode = "default", }, }, }, @@ -4977,7 +5459,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetTaskAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = await client.GetTaskAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -4991,7 +5473,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetTask("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); +Response response = client.GetTask("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5189,7 +5671,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetTaskFileAsync("jobId", "task1", "wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null); +Response response = await client.GetTaskFileAsync("jobId", "task1", "wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5203,7 +5685,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetTaskFile("jobId", "task1", "wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null); +Response response = client.GetTaskFile("jobId", "task1", "wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5220,7 +5702,7 @@ BatchClient client = new BatchClient(endpoint, credential); BatchNodeUserCreateOptions user = new BatchNodeUserCreateOptions("userName") { IsAdmin = false, - ExpiryTime = DateTimeOffset.Parse("2017-08-01T00:00:00Z"), + ExpiryTime = DateTimeOffset.Parse("2025-08-01T00:00:00Z"), Password = "Password", }; Response response = await client.CreateNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", user); @@ -5237,7 +5719,7 @@ BatchClient client = new BatchClient(endpoint, credential); BatchNodeUserCreateOptions user = new BatchNodeUserCreateOptions("userName") { IsAdmin = false, - ExpiryTime = DateTimeOffset.Parse("2017-08-01T00:00:00Z"), + ExpiryTime = DateTimeOffset.Parse("2025-08-01T00:00:00Z"), Password = "Password", }; Response response = client.CreateNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", user); @@ -5255,7 +5737,7 @@ using RequestContent content = RequestContent.Create(new { name = "userName", isAdmin = false, - expiryTime = "2017-08-01T00:00:00Z", + expiryTime = "2025-08-01T00:00:00Z", password = "Password", }); Response response = await client.CreateNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", content); @@ -5275,7 +5757,7 @@ using RequestContent content = RequestContent.Create(new { name = "userName", isAdmin = false, - expiryTime = "2017-08-01T00:00:00Z", + expiryTime = "2025-08-01T00:00:00Z", password = "Password", }); Response response = client.CreateNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", content); @@ -5320,7 +5802,7 @@ BatchClient client = new BatchClient(endpoint, credential); BatchNodeUserUpdateOptions updateOptions = new BatchNodeUserUpdateOptions { Password = "12345", - ExpiryTime = DateTimeOffset.Parse("2016-11-27T00:45:48.7320857Z"), + ExpiryTime = DateTimeOffset.Parse("2025-11-27T00:45:48.7320857Z"), }; Response response = await client.ReplaceNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", "userName", updateOptions); ]]> @@ -5336,7 +5818,7 @@ BatchClient client = new BatchClient(endpoint, credential); BatchNodeUserUpdateOptions updateOptions = new BatchNodeUserUpdateOptions { Password = "12345", - ExpiryTime = DateTimeOffset.Parse("2016-11-27T00:45:48.7320857Z"), + ExpiryTime = DateTimeOffset.Parse("2025-11-27T00:45:48.7320857Z"), }; Response response = client.ReplaceNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", "userName", updateOptions); ]]> @@ -5352,7 +5834,7 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { password = "12345", - expiryTime = "2016-11-27T00:45:48.7320857Z", + expiryTime = "2025-11-27T00:45:48.7320857Z", }); Response response = await client.ReplaceNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", "userName", content); @@ -5370,7 +5852,7 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { password = "12345", - expiryTime = "2016-11-27T00:45:48.7320857Z", + expiryTime = "2025-11-27T00:45:48.7320857Z", }); Response response = client.ReplaceNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", "userName", content); @@ -5386,6 +5868,14 @@ TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); Response response = await client.GetNodeAsync("poolId", "tvm-1695681911_2-20161122t193202z"); +]]> +This sample shows how to call GetNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeAsync("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d"); ]]> @@ -5397,6 +5887,14 @@ TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); Response response = client.GetNode("poolId", "tvm-1695681911_2-20161122t193202z"); +]]> +This sample shows how to call GetNode. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNode("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d"); ]]> @@ -5407,7 +5905,18 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetNodeAsync("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); +Response response = await client.GetNodeAsync("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetNodeAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeAsync("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d", null, DateTimeOffset.Parse("Fri, 27 Jun 2025 08:55:44 GMT"), null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5421,7 +5930,18 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetNode("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); +Response response = client.GetNode("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetNode and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNode("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d", null, DateTimeOffset.Parse("Fri, 27 Jun 2025 08:55:44 GMT"), null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5512,6 +6032,14 @@ TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); Response response = await client.GetNodeRemoteLoginSettingsAsync("poolId", "tvm-1695681911_1-20161121t182739z"); +]]> +This sample shows how to call GetNodeRemoteLoginSettingsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeRemoteLoginSettingsAsync("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d"); ]]> @@ -5523,6 +6051,14 @@ TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); Response response = client.GetNodeRemoteLoginSettings("poolId", "tvm-1695681911_1-20161121t182739z"); +]]> +This sample shows how to call GetNodeRemoteLoginSettings. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeRemoteLoginSettings("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d"); ]]> @@ -5533,7 +6069,19 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetNodeRemoteLoginSettingsAsync("poolId", "tvm-1695681911_1-20161121t182739z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); +Response response = await client.GetNodeRemoteLoginSettingsAsync("poolId", "tvm-1695681911_1-20161121t182739z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); +Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); +]]> +This sample shows how to call GetNodeRemoteLoginSettingsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeRemoteLoginSettingsAsync("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d", null, DateTimeOffset.Parse("Fri, 27 Jun 2025 08:52:43 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); @@ -5548,7 +6096,19 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetNodeRemoteLoginSettings("poolId", "tvm-1695681911_1-20161121t182739z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); +Response response = client.GetNodeRemoteLoginSettings("poolId", "tvm-1695681911_1-20161121t182739z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); +Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); +]]> +This sample shows how to call GetNodeRemoteLoginSettings and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeRemoteLoginSettings("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d", null, DateTimeOffset.Parse("Fri, 27 Jun 2025 08:52:43 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); @@ -5563,7 +6123,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig"), DateTimeOffset.Parse("2017-11-27T00:00:00Z")); +UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2025-12-09T18%3A51%3A00Z&sp=w&sv=2025-05-31&sr=c&sig"), DateTimeOffset.Parse("2025-11-27T00:00:00Z")); Response response = await client.UploadNodeLogsAsync("poolId", "tvm-1695681911_1-20161121t182739z", uploadOptions); ]]> @@ -5575,7 +6135,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig"), DateTimeOffset.Parse("2017-11-27T00:00:00Z")); +UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2025-12-09T18%3A51%3A00Z&sp=w&sv=2025-05-31&sr=c&sig"), DateTimeOffset.Parse("2025-11-27T00:00:00Z")); Response response = client.UploadNodeLogs("poolId", "tvm-1695681911_1-20161121t182739z", uploadOptions); ]]> @@ -5589,8 +6149,8 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { - containerUrl = "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", - startTime = "2017-11-27T00:00:00Z", + containerUrl = "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2025-12-09T18%3A51%3A00Z&sp=w&sv=2025-05-31&sr=c&sig", + startTime = "2025-11-27T00:00:00Z", }); Response response = await client.UploadNodeLogsAsync("poolId", "tvm-1695681911_1-20161121t182739z", content); @@ -5609,8 +6169,8 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = RequestContent.Create(new { - containerUrl = "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", - startTime = "2017-11-27T00:00:00Z", + containerUrl = "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2025-12-09T18%3A51%3A00Z&sp=w&sv=2025-05-31&sr=c&sig", + startTime = "2025-11-27T00:00:00Z", }); Response response = client.UploadNodeLogs("poolId", "tvm-1695681911_1-20161121t182739z", content); @@ -5649,7 +6209,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetNodeExtensionAsync("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); +Response response = await client.GetNodeExtensionAsync("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5663,7 +6223,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetNodeExtension("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); +Response response = client.GetNodeExtension("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5725,7 +6285,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = await client.GetNodeFileAsync("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null); +Response response = await client.GetNodeFileAsync("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5739,7 +6299,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -Response response = client.GetNodeFile("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null); +Response response = client.GetNodeFile("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5779,7 +6339,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetApplicationsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null)) +await foreach (BinaryData item in client.GetApplicationsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("id").ToString()); @@ -5796,7 +6356,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetApplications(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null)) +foreach (BinaryData item in client.GetApplications(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("id").ToString()); @@ -5839,7 +6399,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetPoolUsageMetricsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +await foreach (BinaryData item in client.GetPoolUsageMetricsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolId").ToString()); @@ -5858,7 +6418,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetPoolUsageMetrics(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +foreach (BinaryData item in client.GetPoolUsageMetrics(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolId").ToString()); @@ -5903,7 +6463,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetPoolsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +await foreach (BinaryData item in client.GetPoolsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -5918,7 +6478,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetPools(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +foreach (BinaryData item in client.GetPools(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -5959,7 +6519,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetSupportedImagesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) +await foreach (BinaryData item in client.GetSupportedImagesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); @@ -5977,7 +6537,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetSupportedImages(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) +foreach (BinaryData item in client.GetSupportedImages(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); @@ -6021,7 +6581,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetPoolNodeCountsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) +await foreach (BinaryData item in client.GetPoolNodeCountsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolId").ToString()); @@ -6036,7 +6596,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetPoolNodeCounts(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) +foreach (BinaryData item in client.GetPoolNodeCounts(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolId").ToString()); @@ -6077,7 +6637,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetJobsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +await foreach (BinaryData item in client.GetJobsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -6092,7 +6652,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetJobs(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +foreach (BinaryData item in client.GetJobs(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -6133,7 +6693,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetJobsFromSchedulesAsync("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +await foreach (BinaryData item in client.GetJobsFromSchedulesAsync("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -6148,7 +6708,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetJobsFromSchedules("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +foreach (BinaryData item in client.GetJobsFromSchedules("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -6189,7 +6749,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatusesAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) +await foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatusesAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6204,71 +6764,11 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatuses("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) +foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatuses("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); } -]]> - - - -This sample shows how to call GetCertificatesAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -await foreach (BatchCertificate item in client.GetCertificatesAsync()) -{ -} -]]> - - - -This sample shows how to call GetCertificates. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -foreach (BatchCertificate item in client.GetCertificates()) -{ -} -]]> - - - -This sample shows how to call GetCertificatesAsync and parse the result. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -await foreach (BinaryData item in client.GetCertificatesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) -{ - JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; - Console.WriteLine(result.GetProperty("thumbprint").ToString()); - Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); - Console.WriteLine(result.GetProperty("data").ToString()); -} -]]> - - - -This sample shows how to call GetCertificates and parse the result. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -foreach (BinaryData item in client.GetCertificates(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) -{ - JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; - Console.WriteLine(result.GetProperty("thumbprint").ToString()); - Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); - Console.WriteLine(result.GetProperty("data").ToString()); -} ]]> @@ -6305,7 +6805,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetJobSchedulesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +await foreach (BinaryData item in client.GetJobSchedulesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); @@ -6320,7 +6820,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetJobSchedules(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +foreach (BinaryData item in client.GetJobSchedules(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); @@ -6361,7 +6861,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetTasksAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +await foreach (BinaryData item in client.GetTasksAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6376,7 +6876,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetTasks("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) +foreach (BinaryData item in client.GetTasks("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6417,7 +6917,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetSubTasksAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null)) +await foreach (BinaryData item in client.GetSubTasksAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6432,7 +6932,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetSubTasks("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null)) +foreach (BinaryData item in client.GetSubTasks("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6473,7 +6973,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetTaskFilesAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, false, null)) +await foreach (BinaryData item in client.GetTaskFilesAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, false, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6488,7 +6988,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetTaskFiles("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, false, null)) +foreach (BinaryData item in client.GetTaskFiles("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, false, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6529,7 +7029,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetNodesAsync("poolId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) +await foreach (BinaryData item in client.GetNodesAsync("poolId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6544,7 +7044,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetNodes("poolId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) +foreach (BinaryData item in client.GetNodes("poolId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6585,7 +7085,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetNodeExtensionsAsync("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) +await foreach (BinaryData item in client.GetNodeExtensionsAsync("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6600,7 +7100,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetNodeExtensions("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) +foreach (BinaryData item in client.GetNodeExtensions("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6641,7 +7141,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -await foreach (BinaryData item in client.GetNodeFilesAsync("poolId", "tvm-1695681911_1-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, false, null)) +await foreach (BinaryData item in client.GetNodeFilesAsync("poolId", "tvm-1695681911_1-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, false, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6656,7 +7156,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -foreach (BinaryData item in client.GetNodeFiles("poolId", "tvm-1695681911_1-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, false, null)) +foreach (BinaryData item in client.GetNodeFiles("poolId", "tvm-1695681911_1-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, false, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettings.Serialization.cs new file mode 100644 index 000000000000..6bb613d6ebea --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettings.Serialization.cs @@ -0,0 +1,160 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class HostEndpointSettings : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(HostEndpointSettings)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(InVmAccessControlProfileReferenceId)) + { + writer.WritePropertyName("inVMAccessControlProfileReferenceId"u8); + writer.WriteStringValue(InVmAccessControlProfileReferenceId); + } + if (Optional.IsDefined(Mode)) + { + writer.WritePropertyName("mode"u8); + writer.WriteStringValue(Mode.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + HostEndpointSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(HostEndpointSettings)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeHostEndpointSettings(document.RootElement, options); + } + + internal static HostEndpointSettings DeserializeHostEndpointSettings(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string inVMAccessControlProfileReferenceId = default; + HostEndpointSettingsModeTypes? mode = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("inVMAccessControlProfileReferenceId"u8)) + { + inVMAccessControlProfileReferenceId = property.Value.GetString(); + continue; + } + if (property.NameEquals("mode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + mode = new HostEndpointSettingsModeTypes(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new HostEndpointSettings(inVMAccessControlProfileReferenceId, mode, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); + default: + throw new FormatException($"The model {nameof(HostEndpointSettings)} does not support writing '{options.Format}' format."); + } + } + + HostEndpointSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeHostEndpointSettings(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(HostEndpointSettings)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static HostEndpointSettings FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeHostEndpointSettings(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettings.cs new file mode 100644 index 000000000000..98b9190829d9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettings.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies particular host endpoint settings. + public partial class HostEndpointSettings + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public HostEndpointSettings() + { + } + + /// Initializes a new instance of . + /// Specifies the reference to the InVMAccessControlProfileVersion resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/inVMAccessControlProfiles/{profile}/versions/{version}. + /// Specifies the access control policy execution mode. + /// Keeps track of any properties unknown to the library. + internal HostEndpointSettings(string inVmAccessControlProfileReferenceId, HostEndpointSettingsModeTypes? mode, IDictionary serializedAdditionalRawData) + { + InVmAccessControlProfileReferenceId = inVmAccessControlProfileReferenceId; + Mode = mode; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Specifies the reference to the InVMAccessControlProfileVersion resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/inVMAccessControlProfiles/{profile}/versions/{version}. + public string InVmAccessControlProfileReferenceId { get; set; } + /// Specifies the access control policy execution mode. + public HostEndpointSettingsModeTypes? Mode { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettingsModeTypes.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettingsModeTypes.cs new file mode 100644 index 000000000000..11680fe20723 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/HostEndpointSettingsModeTypes.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// HostEndpointSettingsModeTypes enums. + public readonly partial struct HostEndpointSettingsModeTypes : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public HostEndpointSettingsModeTypes(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AuditValue = "Audit"; + private const string EnforceValue = "Enforce"; + + /// In Audit mode, the system acts as if it is enforcing the access control policy, including emitting access denial entries in the logs but it does not actually deny any requests to host endpoints. + public static HostEndpointSettingsModeTypes Audit { get; } = new HostEndpointSettingsModeTypes(AuditValue); + /// Enforce mode is the recommended mode of operation and system will enforce the access control policy. This property cannot be used together with 'inVMAccessControlProfileReferenceId'. + public static HostEndpointSettingsModeTypes Enforce { get; } = new HostEndpointSettingsModeTypes(EnforceValue); + /// Determines if two values are the same. + public static bool operator ==(HostEndpointSettingsModeTypes left, HostEndpointSettingsModeTypes right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(HostEndpointSettingsModeTypes left, HostEndpointSettingsModeTypes right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator HostEndpointSettingsModeTypes(string value) => new HostEndpointSettingsModeTypes(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is HostEndpointSettingsModeTypes other && Equals(other); + /// + public bool Equals(HostEndpointSettingsModeTypes other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/IPFamily.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/IPFamily.cs new file mode 100644 index 000000000000..8e0c315db4f3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/IPFamily.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// The IP families used to specify IP versions available to the pool. + public readonly partial struct IPFamily : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public IPFamily(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string IPv4Value = "IPv4"; + private const string IPv6Value = "IPv6"; + + /// IPv4 is available to the pool. + public static IPFamily IPv4 { get; } = new IPFamily(IPv4Value); + /// IPv6 is available to the pool. + public static IPFamily IPv6 { get; } = new IPFamily(IPv6Value); + /// Determines if two values are the same. + public static bool operator ==(IPFamily left, IPFamily right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(IPFamily left, IPFamily right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator IPFamily(string value) => new IPFamily(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is IPFamily other && Equals(other); + /// + public bool Equals(IPFamily other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/IPTag.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/IPTag.Serialization.cs new file mode 100644 index 000000000000..79b83a121636 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/IPTag.Serialization.cs @@ -0,0 +1,156 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class IPTag : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(IPTag)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(IpTagType)) + { + writer.WritePropertyName("ipTagType"u8); + writer.WriteStringValue(IpTagType); + } + if (Optional.IsDefined(Tag)) + { + writer.WritePropertyName("tag"u8); + writer.WriteStringValue(Tag); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + IPTag IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(IPTag)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeIPTag(document.RootElement, options); + } + + internal static IPTag DeserializeIPTag(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string ipTagType = default; + string tag = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("ipTagType"u8)) + { + ipTagType = property.Value.GetString(); + continue; + } + if (property.NameEquals("tag"u8)) + { + tag = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new IPTag(ipTagType, tag, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); + default: + throw new FormatException($"The model {nameof(IPTag)} does not support writing '{options.Format}' format."); + } + } + + IPTag IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeIPTag(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(IPTag)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static IPTag FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeIPTag(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/IPTag.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/IPTag.cs new file mode 100644 index 000000000000..12377da97a14 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/IPTag.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Contains the IP tag associated with the public IP address. + public partial class IPTag + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public IPTag() + { + } + + /// Initializes a new instance of . + /// The IP Tag type. Example: FirstPartyUsage. + /// The value of the IP tag associated with the public IP. Example: SQL. + /// Keeps track of any properties unknown to the library. + internal IPTag(string ipTagType, string tag, IDictionary serializedAdditionalRawData) + { + IpTagType = ipTagType; + Tag = tag; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The IP Tag type. Example: FirstPartyUsage. + public string IpTagType { get; set; } + /// The value of the IP tag associated with the public IP. Example: SQL. + public string Tag { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs index 61fd2056f8e2..3a8f644ae69e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs @@ -34,6 +34,11 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit throw new FormatException($"The model {nameof(ManagedDisk)} does not support writing '{format}' format."); } + if (Optional.IsDefined(DiskEncryptionSet)) + { + writer.WritePropertyName("diskEncryptionSet"u8); + writer.WriteObjectValue(DiskEncryptionSet, options); + } if (Optional.IsDefined(StorageAccountType)) { writer.WritePropertyName("storageAccountType"u8); @@ -81,12 +86,22 @@ internal static ManagedDisk DeserializeManagedDisk(JsonElement element, ModelRea { return null; } + DiskEncryptionSetParameters diskEncryptionSet = default; StorageAccountType? storageAccountType = default; BatchVmDiskSecurityProfile securityProfile = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { + if (property.NameEquals("diskEncryptionSet"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + diskEncryptionSet = DiskEncryptionSetParameters.DeserializeDiskEncryptionSetParameters(property.Value, options); + continue; + } if (property.NameEquals("storageAccountType"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -111,7 +126,7 @@ internal static ManagedDisk DeserializeManagedDisk(JsonElement element, ModelRea } } serializedAdditionalRawData = rawDataDictionary; - return new ManagedDisk(storageAccountType, securityProfile, serializedAdditionalRawData); + return new ManagedDisk(diskEncryptionSet, storageAccountType, securityProfile, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs index 367915fe8514..a1e0e48f052b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs @@ -51,16 +51,20 @@ public ManagedDisk() } /// Initializes a new instance of . + /// Specifies the customer managed disk encryption set resource id for the managed disk. It can be set only in UserSubscription mode. /// The storage account type for managed disk. /// Specifies the security profile settings for the managed disk. /// Keeps track of any properties unknown to the library. - internal ManagedDisk(StorageAccountType? storageAccountType, BatchVmDiskSecurityProfile securityProfile, IDictionary serializedAdditionalRawData) + internal ManagedDisk(DiskEncryptionSetParameters diskEncryptionSet, StorageAccountType? storageAccountType, BatchVmDiskSecurityProfile securityProfile, IDictionary serializedAdditionalRawData) { + DiskEncryptionSet = diskEncryptionSet; StorageAccountType = storageAccountType; SecurityProfile = securityProfile; _serializedAdditionalRawData = serializedAdditionalRawData; } + /// Specifies the customer managed disk encryption set resource id for the managed disk. It can be set only in UserSubscription mode. + public DiskEncryptionSetParameters DiskEncryptionSet { get; set; } /// The storage account type for managed disk. public StorageAccountType? StorageAccountType { get; set; } /// Specifies the security profile settings for the managed disk. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Models/AzureComputeBatchContext.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/Models/AzureComputeBatchContext.cs index 47c05eaba7af..4aa3413c48b8 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/Models/AzureComputeBatchContext.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Models/AzureComputeBatchContext.cs @@ -24,9 +24,6 @@ namespace Azure.Compute.Batch [ModelReaderWriterBuildable(typeof(BatchApplication))] [ModelReaderWriterBuildable(typeof(BatchApplicationPackageReference))] [ModelReaderWriterBuildable(typeof(BatchAutoPoolSpecification))] - [ModelReaderWriterBuildable(typeof(BatchCertificate))] - [ModelReaderWriterBuildable(typeof(BatchCertificateDeleteError))] - [ModelReaderWriterBuildable(typeof(BatchCertificateReference))] [ModelReaderWriterBuildable(typeof(BatchContainerConfiguration))] [ModelReaderWriterBuildable(typeof(BatchCreateTaskCollectionResult))] [ModelReaderWriterBuildable(typeof(BatchDiffDiskSettings))] @@ -83,6 +80,7 @@ namespace Azure.Compute.Batch [ModelReaderWriterBuildable(typeof(BatchPoolEndpointConfiguration))] [ModelReaderWriterBuildable(typeof(BatchPoolEvaluateAutoScaleOptions))] [ModelReaderWriterBuildable(typeof(BatchPoolIdentity))] + [ModelReaderWriterBuildable(typeof(BatchPoolIdentityReference))] [ModelReaderWriterBuildable(typeof(BatchPoolInfo))] [ModelReaderWriterBuildable(typeof(BatchPoolNodeCounts))] [ModelReaderWriterBuildable(typeof(BatchPoolReplaceOptions))] @@ -123,15 +121,19 @@ namespace Azure.Compute.Batch [ModelReaderWriterBuildable(typeof(ContainerHostBatchBindMountEntry))] [ModelReaderWriterBuildable(typeof(ContainerRegistryReference))] [ModelReaderWriterBuildable(typeof(DataDisk))] + [ModelReaderWriterBuildable(typeof(DiskCustomerManagedKey))] [ModelReaderWriterBuildable(typeof(DiskEncryptionConfiguration))] + [ModelReaderWriterBuildable(typeof(DiskEncryptionSetParameters))] [ModelReaderWriterBuildable(typeof(EnvironmentSetting))] [ModelReaderWriterBuildable(typeof(ExitCodeMapping))] [ModelReaderWriterBuildable(typeof(ExitCodeRangeMapping))] [ModelReaderWriterBuildable(typeof(ExitConditions))] [ModelReaderWriterBuildable(typeof(ExitOptions))] [ModelReaderWriterBuildable(typeof(FileProperties))] + [ModelReaderWriterBuildable(typeof(HostEndpointSettings))] [ModelReaderWriterBuildable(typeof(InboundEndpoint))] [ModelReaderWriterBuildable(typeof(InstanceViewStatus))] + [ModelReaderWriterBuildable(typeof(IPTag))] [ModelReaderWriterBuildable(typeof(LinuxUserConfiguration))] [ModelReaderWriterBuildable(typeof(ManagedDisk))] [ModelReaderWriterBuildable(typeof(MountConfiguration))] @@ -145,6 +147,7 @@ namespace Azure.Compute.Batch [ModelReaderWriterBuildable(typeof(OutputFileDestination))] [ModelReaderWriterBuildable(typeof(OutputFileUploadConfig))] [ModelReaderWriterBuildable(typeof(OutputFileUploadHeader))] + [ModelReaderWriterBuildable(typeof(ProxyAgentSettings))] [ModelReaderWriterBuildable(typeof(RecentBatchJob))] [ModelReaderWriterBuildable(typeof(ResizeError))] [ModelReaderWriterBuildable(typeof(ResourceFile))] diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ProxyAgentSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ProxyAgentSettings.Serialization.cs new file mode 100644 index 000000000000..a737d953a7d0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ProxyAgentSettings.Serialization.cs @@ -0,0 +1,179 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ProxyAgentSettings : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ProxyAgentSettings)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(Enabled)) + { + writer.WritePropertyName("enabled"u8); + writer.WriteBooleanValue(Enabled.Value); + } + if (Optional.IsDefined(Imds)) + { + writer.WritePropertyName("imds"u8); + writer.WriteObjectValue(Imds, options); + } + if (Optional.IsDefined(WireServer)) + { + writer.WritePropertyName("wireServer"u8); + writer.WriteObjectValue(WireServer, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + ProxyAgentSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ProxyAgentSettings)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeProxyAgentSettings(document.RootElement, options); + } + + internal static ProxyAgentSettings DeserializeProxyAgentSettings(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool? enabled = default; + HostEndpointSettings imds = default; + HostEndpointSettings wireServer = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("enabled"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enabled = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("imds"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + imds = HostEndpointSettings.DeserializeHostEndpointSettings(property.Value, options); + continue; + } + if (property.NameEquals("wireServer"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + wireServer = HostEndpointSettings.DeserializeHostEndpointSettings(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ProxyAgentSettings(enabled, imds, wireServer, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); + default: + throw new FormatException($"The model {nameof(ProxyAgentSettings)} does not support writing '{options.Format}' format."); + } + } + + ProxyAgentSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeProxyAgentSettings(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ProxyAgentSettings)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ProxyAgentSettings FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeProxyAgentSettings(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ProxyAgentSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ProxyAgentSettings.cs new file mode 100644 index 000000000000..e462ede75750 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ProxyAgentSettings.cs @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies ProxyAgent settings while creating the virtual machine. + public partial class ProxyAgentSettings + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ProxyAgentSettings() + { + } + + /// Initializes a new instance of . + /// Specifies whether Metadata Security Protocol feature should be enabled on the virtual machine or virtual machine scale set. Default is False. + /// Settings for the IMDS endpoint. + /// Settings for the WireServer endpoint. + /// Keeps track of any properties unknown to the library. + internal ProxyAgentSettings(bool? enabled, HostEndpointSettings imds, HostEndpointSettings wireServer, IDictionary serializedAdditionalRawData) + { + Enabled = enabled; + Imds = imds; + WireServer = wireServer; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Specifies whether Metadata Security Protocol feature should be enabled on the virtual machine or virtual machine scale set. Default is False. + public bool? Enabled { get; set; } + /// Settings for the IMDS endpoint. + public HostEndpointSettings Imds { get; set; } + /// Settings for the WireServer endpoint. + public HostEndpointSettings WireServer { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityEncryptionTypes.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityEncryptionTypes.cs index 2ab799148d35..80e5c8bb5313 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityEncryptionTypes.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityEncryptionTypes.cs @@ -22,12 +22,15 @@ public SecurityEncryptionTypes(string value) _value = value ?? throw new ArgumentNullException(nameof(value)); } + private const string DiskWithVMGuestStateValue = "DiskWithVMGuestState"; private const string NonPersistedTPMValue = "NonPersistedTPM"; private const string VMGuestStateOnlyValue = "VMGuestStateOnly"; - /// NonPersistedTPM. + /// EncryptionType of the managed disk is set to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob. It is not supported in data disks. + public static SecurityEncryptionTypes DiskWithVMGuestState { get; } = new SecurityEncryptionTypes(DiskWithVMGuestStateValue); + /// EncryptionType of the managed disk is set to NonPersistedTPM for not persisting firmware state in the VMGuestState blob. public static SecurityEncryptionTypes NonPersistedTPM { get; } = new SecurityEncryptionTypes(NonPersistedTPMValue); - /// VMGuestStateOnly. + /// EncryptionType of the managed disk is set to VMGuestStateOnly for encryption of just the VMGuestState blob. public static SecurityEncryptionTypes VMGuestStateOnly { get; } = new SecurityEncryptionTypes(VMGuestStateOnlyValue); /// Determines if two values are the same. public static bool operator ==(SecurityEncryptionTypes left, SecurityEncryptionTypes right) => left.Equals(right); diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs index 7f009917fe2e..bbda79902066 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs @@ -39,6 +39,11 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("encryptionAtHost"u8); writer.WriteBooleanValue(EncryptionAtHost.Value); } + if (Optional.IsDefined(ProxyAgentSettings)) + { + writer.WritePropertyName("proxyAgentSettings"u8); + writer.WriteObjectValue(ProxyAgentSettings, options); + } if (Optional.IsDefined(SecurityType)) { writer.WritePropertyName("securityType"u8); @@ -87,6 +92,7 @@ internal static SecurityProfile DeserializeSecurityProfile(JsonElement element, return null; } bool? encryptionAtHost = default; + ProxyAgentSettings proxyAgentSettings = default; SecurityTypes? securityType = default; BatchUefiSettings uefiSettings = default; IDictionary serializedAdditionalRawData = default; @@ -102,6 +108,15 @@ internal static SecurityProfile DeserializeSecurityProfile(JsonElement element, encryptionAtHost = property.Value.GetBoolean(); continue; } + if (property.NameEquals("proxyAgentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + proxyAgentSettings = ProxyAgentSettings.DeserializeProxyAgentSettings(property.Value, options); + continue; + } if (property.NameEquals("securityType"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -126,7 +141,7 @@ internal static SecurityProfile DeserializeSecurityProfile(JsonElement element, } } serializedAdditionalRawData = rawDataDictionary; - return new SecurityProfile(encryptionAtHost, securityType, uefiSettings, serializedAdditionalRawData); + return new SecurityProfile(encryptionAtHost, proxyAgentSettings, securityType, uefiSettings, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs index 7bc1f62730d6..d598d0a5d6a4 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs @@ -52,12 +52,14 @@ public SecurityProfile() /// Initializes a new instance of . /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. + /// Specifies ProxyAgent settings while creating the virtual machine. /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. /// Keeps track of any properties unknown to the library. - internal SecurityProfile(bool? encryptionAtHost, SecurityTypes? securityType, BatchUefiSettings uefiSettings, IDictionary serializedAdditionalRawData) + internal SecurityProfile(bool? encryptionAtHost, ProxyAgentSettings proxyAgentSettings, SecurityTypes? securityType, BatchUefiSettings uefiSettings, IDictionary serializedAdditionalRawData) { EncryptionAtHost = encryptionAtHost; + ProxyAgentSettings = proxyAgentSettings; SecurityType = securityType; UefiSettings = uefiSettings; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -65,6 +67,8 @@ internal SecurityProfile(bool? encryptionAtHost, SecurityTypes? securityType, Ba /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. public bool? EncryptionAtHost { get; set; } + /// Specifies ProxyAgent settings while creating the virtual machine. + public ProxyAgentSettings ProxyAgentSettings { get; set; } /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. public SecurityTypes? SecurityType { get; set; } /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradeMode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradeMode.cs index dcd68b7587e5..23cf531f9811 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradeMode.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradeMode.cs @@ -26,7 +26,7 @@ public UpgradeMode(string value) private const string ManualValue = "manual"; private const string RollingValue = "rolling"; - /// TAll virtual machines in the scale set are automatically updated at the same time. + /// All virtual machines in the scale set are automatically updated at the same time. public static UpgradeMode Automatic { get; } = new UpgradeMode(AutomaticValue); /// You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action. public static UpgradeMode Manual { get; } = new UpgradeMode(ManualValue); diff --git a/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs b/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs index 21753c086293..9fa61e814aba 100644 --- a/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs +++ b/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs @@ -26,7 +26,7 @@ public void Example_BatchClient_GetApplication_GetApplications() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetApplication("my_application_id", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); + Response response = client.GetApplication("my_application_id", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("id").ToString()); @@ -42,7 +42,7 @@ public async Task Example_BatchClient_GetApplication_GetApplications_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetApplicationAsync("my_application_id", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); + Response response = await client.GetApplicationAsync("my_application_id", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("id").ToString()); @@ -90,7 +90,7 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetworking { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -124,7 +124,7 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetw { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -154,7 +154,7 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetworking { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64"), TargetDedicatedNodes = 2, @@ -180,7 +180,7 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetw { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64"), TargetDedicatedNodes = 2, @@ -192,6 +192,688 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetw Response response = await client.CreatePoolAsync(pool); } + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreatePool_CreatesAPoolWithConfidentialDiskEncryptionSetForUserSubscriptionAccounts() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "pool", + vmSize = "Standard_DC2as_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2019-datacenter-core-g2", + version = "latest", + }, + osDisk = new + { + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + securityProfile = new + { + securityEncryptionType = "DiskWithVMGuestState", + }, + }, + }, + dataDisks = new object[] + { +new +{ +lun = 0, +diskSizeGB = 1024, +managedDisk = new +{ +storageAccountType = "standard_lrs", +diskEncryptionSet = new +{ +id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", +}, +}, +} + }, + securityProfile = new + { + securityType = "confidentialVM", + uefiSettings = new + { + vTpmEnabled = true, + secureBootEnabled = true, + }, + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 1, + }); + Response response = client.CreatePool(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithConfidentialDiskEncryptionSetForUserSubscriptionAccounts_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "pool", + vmSize = "Standard_DC2as_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2019-datacenter-core-g2", + version = "latest", + }, + osDisk = new + { + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + securityProfile = new + { + securityEncryptionType = "DiskWithVMGuestState", + }, + }, + }, + dataDisks = new object[] + { +new +{ +lun = 0, +diskSizeGB = 1024, +managedDisk = new +{ +storageAccountType = "standard_lrs", +diskEncryptionSet = new +{ +id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", +}, +}, +} + }, + securityProfile = new + { + securityType = "confidentialVM", + uefiSettings = new + { + vTpmEnabled = true, + secureBootEnabled = true, + }, + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 1, + }); + Response response = await client.CreatePoolAsync(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreatePool_CreatesAPoolWithConfidentialDiskEncryptionSetForUserSubscriptionAccounts_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool", "Standard_DC2as_v5") + { + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-core-g2", + Version = "latest", + }, "batch.node.windows amd64") + { + DataDisks = {new DataDisk(0, 1024) +{ +ManagedDisk = new ManagedDisk +{ +DiskEncryptionSet = new DiskEncryptionSetParameters +{ +Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), +}, +StorageAccountType = StorageAccountType.StandardLRS, +}, +}}, + OsDisk = new BatchOsDisk + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + SecurityProfile = new BatchVmDiskSecurityProfile + { + SecurityEncryptionType = SecurityEncryptionTypes.DiskWithVMGuestState, + }, + }, + }, + SecurityProfile = new SecurityProfile + { + SecurityType = SecurityTypes.ConfidentialVM, + UefiSettings = new BatchUefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }, + }, + }, + TargetDedicatedNodes = 1, + }; + Response response = client.CreatePool(pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithConfidentialDiskEncryptionSetForUserSubscriptionAccounts_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool", "Standard_DC2as_v5") + { + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-core-g2", + Version = "latest", + }, "batch.node.windows amd64") + { + DataDisks = {new DataDisk(0, 1024) +{ +ManagedDisk = new ManagedDisk +{ +DiskEncryptionSet = new DiskEncryptionSetParameters +{ +Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), +}, +StorageAccountType = StorageAccountType.StandardLRS, +}, +}}, + OsDisk = new BatchOsDisk + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + SecurityProfile = new BatchVmDiskSecurityProfile + { + SecurityEncryptionType = SecurityEncryptionTypes.DiskWithVMGuestState, + }, + }, + }, + SecurityProfile = new SecurityProfile + { + SecurityType = SecurityTypes.ConfidentialVM, + UefiSettings = new BatchUefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }, + }, + }, + TargetDedicatedNodes = 1, + }; + Response response = await client.CreatePoolAsync(pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreatePool_CreatesAPoolWithDiskEncryptionSetForUserSubscriptionAccounts() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "pool", + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2019-datacenter-core-g2", + version = "latest", + }, + osDisk = new + { + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + }, + }, + dataDisks = new object[] + { +new +{ +lun = 0, +diskSizeGB = 1024, +managedDisk = new +{ +storageAccountType = "standard_lrs", +diskEncryptionSet = new +{ +id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", +}, +}, +} + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 1, + }); + Response response = client.CreatePool(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithDiskEncryptionSetForUserSubscriptionAccounts_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "pool", + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "MicrosoftWindowsServer", + offer = "WindowsServer", + sku = "2019-datacenter-core-g2", + version = "latest", + }, + osDisk = new + { + managedDisk = new + { + storageAccountType = "standard_lrs", + diskEncryptionSet = new + { + id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", + }, + }, + }, + dataDisks = new object[] + { +new +{ +lun = 0, +diskSizeGB = 1024, +managedDisk = new +{ +storageAccountType = "standard_lrs", +diskEncryptionSet = new +{ +id = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId", +}, +}, +} + }, + nodeAgentSKUId = "batch.node.windows amd64", + }, + targetDedicatedNodes = 1, + }); + Response response = await client.CreatePoolAsync(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreatePool_CreatesAPoolWithDiskEncryptionSetForUserSubscriptionAccounts_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool", "Standard_D2ds_v5") + { + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-core-g2", + Version = "latest", + }, "batch.node.windows amd64") + { + DataDisks = {new DataDisk(0, 1024) +{ +ManagedDisk = new ManagedDisk +{ +DiskEncryptionSet = new DiskEncryptionSetParameters +{ +Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), +}, +StorageAccountType = StorageAccountType.StandardLRS, +}, +}}, + OsDisk = new BatchOsDisk + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + }, + }, + }, + TargetDedicatedNodes = 1, + }; + Response response = client.CreatePool(pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithDiskEncryptionSetForUserSubscriptionAccounts_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool", "Standard_D2ds_v5") + { + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-core-g2", + Version = "latest", + }, "batch.node.windows amd64") + { + DataDisks = {new DataDisk(0, 1024) +{ +ManagedDisk = new ManagedDisk +{ +DiskEncryptionSet = new DiskEncryptionSetParameters +{ +Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), +}, +StorageAccountType = StorageAccountType.StandardLRS, +}, +}}, + OsDisk = new BatchOsDisk + { + ManagedDisk = new ManagedDisk + { + DiskEncryptionSet = new DiskEncryptionSetParameters + { + Id = new ResourceIdentifier("/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/diskEncryptionSets/DiskEncryptionSetId"), + }, + StorageAccountType = StorageAccountType.StandardLRS, + }, + }, + }, + TargetDedicatedNodes = 1, + }; + Response response = await client.CreatePoolAsync(pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreatePool_CreatesAPoolWithDualStackNetworking() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "dualstackpool", + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", + }, + nodeAgentSKUId = "batch.node.ubuntu 20.04", + }, + networkConfiguration = new + { + publicIPAddressConfiguration = new + { + ipFamilies = new object[] + { +"IPv4", +"IPv6" + }, + }, + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +backendPort = 22, +frontendPortRangeStart = 40000, +frontendPortRangeEnd = 40500, +name = "sshpool", +protocol = "tcp", +networkSecurityGroupRules = new object[] +{ +new +{ +access = "allow", +priority = 1000, +sourceAddressPrefix = "*", +sourcePortRanges = new object[] +{ +"*" +}, +} +}, +} + }, + }, + }, + resizeTimeout = "PT15M", + targetDedicatedNodes = 1, + targetLowPriorityNodes = 0, + }); + Response response = client.CreatePool(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithDualStackNetworking_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "dualstackpool", + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", + }, + nodeAgentSKUId = "batch.node.ubuntu 20.04", + }, + networkConfiguration = new + { + publicIPAddressConfiguration = new + { + ipFamilies = new object[] + { +"IPv4", +"IPv6" + }, + }, + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +backendPort = 22, +frontendPortRangeStart = 40000, +frontendPortRangeEnd = 40500, +name = "sshpool", +protocol = "tcp", +networkSecurityGroupRules = new object[] +{ +new +{ +access = "allow", +priority = 1000, +sourceAddressPrefix = "*", +sourcePortRanges = new object[] +{ +"*" +}, +} +}, +} + }, + }, + }, + resizeTimeout = "PT15M", + targetDedicatedNodes = 1, + targetLowPriorityNodes = 0, + }); + Response response = await client.CreatePoolAsync(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreatePool_CreatesAPoolWithDualStackNetworking_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("dualstackpool", "Standard_D2ds_v5") + { + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "Canonical", + Offer = "ubuntu-24_04-lts", + Sku = "server", + }, "batch.node.ubuntu 20.04"), + ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), + TargetDedicatedNodes = 1, + TargetLowPriorityNodes = 0, + NetworkConfiguration = new NetworkConfiguration + { + EndpointConfiguration = new BatchPoolEndpointConfiguration(new BatchInboundNatPool[] + { +new BatchInboundNatPool("sshpool", InboundEndpointProtocol.Tcp, 22, 40000, 40500) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1000, NetworkSecurityGroupRuleAccess.Allow, "*") +{ +SourcePortRanges = {"*"}, +}}, +} + }), + PublicIpAddressConfiguration = new BatchPublicIpAddressConfiguration + { + IpFamilies = { IPFamily.IPv4, IPFamily.IPv6 }, + }, + }, + }; + Response response = client.CreatePool(pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithDualStackNetworking_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("dualstackpool", "Standard_D2ds_v5") + { + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference + { + Publisher = "Canonical", + Offer = "ubuntu-24_04-lts", + Sku = "server", + }, "batch.node.ubuntu 20.04"), + ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), + TargetDedicatedNodes = 1, + TargetLowPriorityNodes = 0, + NetworkConfiguration = new NetworkConfiguration + { + EndpointConfiguration = new BatchPoolEndpointConfiguration(new BatchInboundNatPool[] + { +new BatchInboundNatPool("sshpool", InboundEndpointProtocol.Tcp, 22, 40000, 40500) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1000, NetworkSecurityGroupRuleAccess.Allow, "*") +{ +SourcePortRanges = {"*"}, +}}, +} + }), + PublicIpAddressConfiguration = new BatchPublicIpAddressConfiguration + { + IpFamilies = { IPFamily.IPv4, IPFamily.IPv6 }, + }, + }, + }; + Response response = await client.CreatePoolAsync(pool); + } + [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified() @@ -203,14 +885,14 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified() using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "UbuntuServer", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", }, @@ -284,14 +966,14 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpeci using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "UbuntuServer", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", }, @@ -362,13 +1044,13 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified_C TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "UbuntuServer", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04"), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), TargetDedicatedNodes = 5, @@ -413,13 +1095,13 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpeci TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "UbuntuServer", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04"), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), TargetDedicatedNodes = 5, @@ -467,14 +1149,14 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo using RequestContent content = RequestContent.Create(new { id = "mypool001", - vmSize = "standard_d2s_v3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, osDisk = new { @@ -513,14 +1195,14 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura using RequestContent content = RequestContent.Create(new { id = "mypool001", - vmSize = "standard_d2s_v3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, osDisk = new { @@ -556,13 +1238,13 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "standard_d2s_v3") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04") { OsDisk = new BatchOsDisk @@ -596,13 +1278,13 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "standard_d2s_v3") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04") { OsDisk = new BatchOsDisk @@ -628,130 +1310,6 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura Response response = await client.CreatePoolAsync(pool); } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - id = "mypool001", - vmSize = "STANDARD_DC2s_V2", - virtualMachineConfiguration = new - { - imageReference = new - { - publisher = "Canonical", - offer = "UbuntuServer", - sku = "18_04-lts-gen2", - version = "latest", - }, - nodeAgentSKUId = "batch.node.ubuntu 18.04", - }, - targetDedicatedNodes = 1, - resourceTags = new - { - TagName1 = "TagValue1", - TagName2 = "TagValue2", - }, - }); - Response response = client.CreatePool(content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - id = "mypool001", - vmSize = "STANDARD_DC2s_V2", - virtualMachineConfiguration = new - { - imageReference = new - { - publisher = "Canonical", - offer = "UbuntuServer", - sku = "18_04-lts-gen2", - version = "latest", - }, - nodeAgentSKUId = "batch.node.ubuntu 18.04", - }, - targetDedicatedNodes = 1, - resourceTags = new - { - TagName1 = "TagValue1", - TagName2 = "TagValue2", - }, - }); - Response response = await client.CreatePoolAsync(content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") - { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference - { - Publisher = "Canonical", - Offer = "UbuntuServer", - Sku = "18_04-lts-gen2", - Version = "latest", - }, "batch.node.ubuntu 18.04"), - ResourceTags = -{ -["TagName1"] = "TagValue1", -["TagName2"] = "TagValue2" -}, - TargetDedicatedNodes = 1, - }; - Response response = client.CreatePool(pool); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") - { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference - { - Publisher = "Canonical", - Offer = "UbuntuServer", - Sku = "18_04-lts-gen2", - Version = "latest", - }, "batch.node.ubuntu 18.04"), - ResourceTags = -{ -["TagName1"] = "TagValue1", -["TagName2"] = "TagValue2" -}, - TargetDedicatedNodes = 1, - }; - Response response = await client.CreatePoolAsync(pool); - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile() @@ -769,7 +1327,7 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile() imageReference = new { publisher = "Canonical", - offer = "UbuntuServer", + offer = "ubuntu-24_04-lts", sku = "18_04-lts-gen2", version = "latest", }, @@ -808,7 +1366,7 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile imageReference = new { publisher = "Canonical", - offer = "UbuntuServer", + offer = "ubuntu-24_04-lts", sku = "18_04-lts-gen2", version = "latest", }, @@ -843,7 +1401,7 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile_Conve VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "UbuntuServer", + Offer = "ubuntu-24_04-lts", Sku = "18_04-lts-gen2", Version = "latest", }, "batch.node.ubuntu 18.04") @@ -876,7 +1434,7 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "UbuntuServer", + Offer = "ubuntu-24_04-lts", Sku = "18_04-lts-gen2", Version = "latest", }, "batch.node.ubuntu 18.04") @@ -907,14 +1465,14 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", }, @@ -953,14 +1511,14 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", }, @@ -996,13 +1554,13 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04"), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), TargetDedicatedNodes = 5, @@ -1024,13 +1582,13 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04"), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), TargetDedicatedNodes = 5, @@ -1055,13 +1613,13 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", + offer = "ubuntu-24_04-lts", sku = "120_04-lts", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", @@ -1100,13 +1658,13 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", + offer = "ubuntu-24_04-lts", sku = "120_04-lts", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", @@ -1142,12 +1700,12 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", + Offer = "ubuntu-24_04-lts", Sku = "120_04-lts", }, "batch.node.ubuntu 20.04") { @@ -1174,12 +1732,12 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", + Offer = "ubuntu-24_04-lts", Sku = "120_04-lts", }, "batch.node.ubuntu 20.04") { @@ -1209,14 +1767,14 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", extensions = new object[] @@ -1255,7 +1813,6 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo value = "myvalue", } }, - targetNodeCommunicationMode = "simplified", }); Response response = client.CreatePool(content); @@ -1273,14 +1830,14 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura using RequestContent content = RequestContent.Create(new { id = "pool2", - vmSize = "standard_a1", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "Canonical", - offer = "0001-com-ubuntu-server-focal", - sku = "20_04-lts", + offer = "ubuntu-24_04-lts", + sku = "server", }, nodeAgentSKUId = "batch.node.ubuntu 20.04", extensions = new object[] @@ -1319,7 +1876,6 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura value = "myvalue", } }, - targetNodeCommunicationMode = "simplified", }); Response response = await client.CreatePoolAsync(content); @@ -1334,13 +1890,13 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04") { Extensions = {new VMExtension("batchextension1", "Microsoft.Azure.KeyVault", "KeyVaultForLinux") @@ -1363,7 +1919,6 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Simplified, }; Response response = client.CreatePool(pool); } @@ -1376,13 +1931,13 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", - Offer = "0001-com-ubuntu-server-focal", - Sku = "20_04-lts", + Offer = "ubuntu-24_04-lts", + Sku = "server", }, "batch.node.ubuntu 20.04") { Extensions = {new VMExtension("batchextension1", "Microsoft.Azure.KeyVault", "KeyVaultForLinux") @@ -1405,7 +1960,6 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Simplified, }; Response response = await client.CreatePoolAsync(pool); } @@ -1421,14 +1975,14 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo using RequestContent content = RequestContent.Create(new { id = "mypool002", - vmSize = "Standard_A1_v2", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, windowsConfiguration = new @@ -1459,14 +2013,14 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura using RequestContent content = RequestContent.Create(new { id = "mypool002", - vmSize = "Standard_A1_v2", + vmSize = "Standard_D4d_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, windowsConfiguration = new @@ -1494,13 +2048,13 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_A1_v2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -1523,13 +2077,13 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_A1_v2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_D4d_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -1602,7 +2156,7 @@ public void Example_BatchClient_GetPool_PoolGet() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -1616,7 +2170,7 @@ public async Task Example_BatchClient_GetPool_PoolGet_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -1644,6 +2198,156 @@ public async Task Example_BatchClient_GetPool_PoolGet_Convenience_Async() Response response = await client.GetPoolAsync("pool"); } + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetPool_PoolGetWithConfidentialDiskEncryptionSetForUserSubscriptionAccounts() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetPool_PoolGetWithConfidentialDiskEncryptionSetForUserSubscriptionAccounts_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetPool_PoolGetWithConfidentialDiskEncryptionSetForUserSubscriptionAccounts_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("pool"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetPool_PoolGetWithConfidentialDiskEncryptionSetForUserSubscriptionAccounts_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetPoolAsync("pool"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetPool_PoolGetWithCustomerManagedKey() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetPool_PoolGetWithCustomerManagedKey_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetPool_PoolGetWithCustomerManagedKey_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("pool"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetPool_PoolGetWithCustomerManagedKey_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetPoolAsync("pool"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetPool_PoolGetWithDiskEncryptionSetForUserSubscriptionAccounts() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetPool_PoolGetWithDiskEncryptionSetForUserSubscriptionAccounts_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetPool_PoolGetWithDiskEncryptionSetForUserSubscriptionAccounts_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("pool"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetPool_PoolGetWithDiskEncryptionSetForUserSubscriptionAccounts_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetPoolAsync("pool"); + } + [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile() @@ -1702,7 +2406,7 @@ public void Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithE TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = client.GetPool("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -1716,7 +2420,7 @@ public async Task Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = await client.GetPoolAsync("pool", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -1884,6 +2588,94 @@ public async Task Example_BatchClient_UpdatePool_PatchThePool_Async() Console.WriteLine(response.Status); } + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_UpdatePool_PatchTheCustomerManagedKeyForAPool() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", + version = "latest", + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk", +"temporarydisk" + }, + customerManagedKey = new + { + keyUrl = "https:///keys//", + identityReference = new + { + resourceId = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1", + }, + }, + }, + nodeAgentSKUId = "batch.node.ubuntu 20.04", + }, + }); + Response response = client.UpdatePool("poolId", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_UpdatePool_PatchTheCustomerManagedKeyForAPool_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + vmSize = "Standard_D2ds_v5", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "Canonical", + offer = "ubuntu-24_04-lts", + sku = "server", + version = "latest", + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk", +"temporarydisk" + }, + customerManagedKey = new + { + keyUrl = "https:///keys//", + identityReference = new + { + resourceId = "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1", + }, + }, + }, + nodeAgentSKUId = "batch.node.ubuntu 20.04", + }, + }); + Response response = await client.UpdatePoolAsync("poolId", content); + + Console.WriteLine(response.Status); + } + [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_DisablePoolAutoScale_DisablePoolAutoscale() @@ -2052,7 +2844,6 @@ public void Example_BatchClient_ReplacePoolProperties_PoolUpdate() { commandLine = "/bin/bash -c 'echo start task'", }, - certificateReferences = Array.Empty(), applicationPackageReferences = Array.Empty(), metadata = Array.Empty(), }); @@ -2075,7 +2866,6 @@ public async Task Example_BatchClient_ReplacePoolProperties_PoolUpdate_Async() { commandLine = "/bin/bash -c 'echo start task'", }, - certificateReferences = Array.Empty(), applicationPackageReferences = Array.Empty(), metadata = Array.Empty(), }); @@ -2092,7 +2882,7 @@ public void Example_BatchClient_ReplacePoolProperties_PoolUpdate_Convenience() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty(), Array.Empty()) + BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -2107,7 +2897,7 @@ public async Task Example_BatchClient_ReplacePoolProperties_PoolUpdate_Convenien TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty(), Array.Empty()) + BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -2122,7 +2912,7 @@ public void Example_BatchClient_GetJob_JobGet() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetJob("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = client.GetJob("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -2136,7 +2926,7 @@ public async Task Example_BatchClient_GetJob_JobGet_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetJobAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = await client.GetJobAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -2462,14 +3252,14 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob() poolLifetimeOption = "job", pool = new { - vmSize = "STANDARD_D2S_V3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -2522,20 +3312,6 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob() maxTaskRetryCount = 2, waitForSuccess = true, }, - certificateReferences = new object[] - { -new -{ -thumbprint = "0123456789abcdef0123456789abcdef01234567", -thumbprintAlgorithm = "sha1", -storeLocation = "localmachine", -storeName = "Root", -visibility = new object[] -{ -"task" -}, -} - }, metadata = new object[] { new @@ -2544,7 +3320,6 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob() value = "myvalue", } }, - targetNodeCommunicationMode = "default", }, }, }, @@ -2630,14 +3405,14 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Async() poolLifetimeOption = "job", pool = new { - vmSize = "STANDARD_D2S_V3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -2690,20 +3465,6 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Async() maxTaskRetryCount = 2, waitForSuccess = true, }, - certificateReferences = new object[] - { -new -{ -thumbprint = "0123456789abcdef0123456789abcdef01234567", -thumbprintAlgorithm = "sha1", -storeLocation = "localmachine", -storeName = "Root", -visibility = new object[] -{ -"task" -}, -} - }, metadata = new object[] { new @@ -2712,7 +3473,6 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Async() value = "myvalue", } }, - targetNodeCommunicationMode = "default", }, }, }, @@ -2743,13 +3503,13 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience() AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("STANDARD_D2S_V3") + Pool = new BatchPoolSpecification("Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -2791,14 +3551,7 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience() MaxTaskRetryCount = 2, WaitForSuccess = true, }, - CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") -{ -StoreLocation = BatchCertificateStoreLocation.LocalMachine, -StoreName = "Root", -Visibility = {BatchCertificateVisibility.Task}, -}}, Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, }) @@ -2860,13 +3613,13 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience_A AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("STANDARD_D2S_V3") + Pool = new BatchPoolSpecification("Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -2895,295 +3648,134 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience_A }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") { -Value = "myvalue", -}}, - UserIdentity = new UserIdentity - { - AutoUser = new AutoUserSpecification - { - Scope = AutoUserScope.Task, - ElevationLevel = ElevationLevel.Admin, - }, - }, - MaxTaskRetryCount = 2, - WaitForSuccess = true, - }, - CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") -{ -StoreLocation = BatchCertificateStoreLocation.LocalMachine, -StoreName = "Root", -Visibility = {BatchCertificateVisibility.Task}, -}}, - Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, - }, - }, - }) - { - Priority = 100, - Constraints = new BatchJobConstraints - { - MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H"), - MaxTaskRetryCount = -1, - }, - JobManagerTask = new BatchJobManagerTask("taskId", "myprogram.exe") - { - ResourceFiles = {new ResourceFile -{ -HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), -FilePath = "myprogram.exe", -}, new ResourceFile -{ -StorageContainerUri = new Uri("http://mystorage1.blob.core.windows.net/data?sas"), -FilePath = "datafolder", -}}, - EnvironmentSettings = {new EnvironmentSetting("myvariable") -{ -Value = "myvalue", -}}, - Constraints = new BatchTaskConstraints - { - MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H"), - RetentionTime = XmlConvert.ToTimeSpan("PT1H"), - MaxTaskRetryCount = 0, - }, - RequiredSlots = 2, - KillJobOnCompletion = false, - UserIdentity = new UserIdentity - { - AutoUser = new AutoUserSpecification - { - Scope = AutoUserScope.Task, - ElevationLevel = ElevationLevel.Admin, - }, - }, - RunExclusive = true, - }, - Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - }; - Response response = await client.CreateJobAsync(job); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.GetJobTaskCounts("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); - - JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.GetJobTaskCountsAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); - - JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); - Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); - Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.GetJobTaskCounts("jobId"); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.GetJobTaskCountsAsync("jobId"); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_CreateCertificate_CertificateCreate() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - thumbprintAlgorithm = "sha1", - thumbprint = "0123456789abcdef0123456789abcdef01234567", - data = "U3dhZ2dlciByb2Nrcw==", - certificateFormat = "pfx", - password = "", - }); - Response response = client.CreateCertificate(content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_CreateCertificate_CertificateCreate_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - thumbprintAlgorithm = "sha1", - thumbprint = "0123456789abcdef0123456789abcdef01234567", - data = "U3dhZ2dlciByb2Nrcw==", - certificateFormat = "pfx", - password = "", - }); - Response response = await client.CreateCertificateAsync(content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_CreateCertificate_CertificateCreate_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", BinaryData.FromObjectAsJson("U3dhZ2dlciByb2Nrcw==")) - { - CertificateFormat = BatchCertificateFormat.Pfx, - Password = "", - }; - Response response = client.CreateCertificate(certificate); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_CreateCertificate_CertificateCreate_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", BinaryData.FromObjectAsJson("U3dhZ2dlciByb2Nrcw==")) +Value = "myvalue", +}}, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.Admin, + }, + }, + MaxTaskRetryCount = 2, + WaitForSuccess = true, + }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, + }, + }, + }) { - CertificateFormat = BatchCertificateFormat.Pfx, - Password = "", + Priority = 100, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H"), + MaxTaskRetryCount = -1, + }, + JobManagerTask = new BatchJobManagerTask("taskId", "myprogram.exe") + { + ResourceFiles = {new ResourceFile +{ +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), +FilePath = "myprogram.exe", +}, new ResourceFile +{ +StorageContainerUri = new Uri("http://mystorage1.blob.core.windows.net/data?sas"), +FilePath = "datafolder", +}}, + EnvironmentSettings = {new EnvironmentSetting("myvariable") +{ +Value = "myvalue", +}}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H"), + MaxTaskRetryCount = 0, + }, + RequiredSlots = 2, + KillJobOnCompletion = false, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.Admin, + }, + }, + RunExclusive = true, + }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; - Response response = await client.CreateCertificateAsync(certificate); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_CancelCertificateDeletion_CertificateCancelDelete() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.CancelCertificateDeletion("sha1", "0123456789abcdef0123456789abcdef01234567"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_CancelCertificateDeletion_CertificateCancelDelete_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.CancelCertificateDeletionAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); - - Console.WriteLine(response.Status); + Response response = await client.CreateJobAsync(job); } [Test] [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_GetCertificate_CertificateGet() + public void Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + Response response = client.GetJobTaskCounts("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; - Console.WriteLine(result.GetProperty("thumbprint").ToString()); - Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); - Console.WriteLine(result.GetProperty("data").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); } [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_GetCertificate_CertificateGet_Async() + public async Task Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + Response response = await client.GetJobTaskCountsAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; - Console.WriteLine(result.GetProperty("thumbprint").ToString()); - Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); - Console.WriteLine(result.GetProperty("data").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); } [Test] [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_GetCertificate_CertificateGet_Convenience() + public void Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); + Response response = client.GetJobTaskCounts("jobId"); } [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_GetCertificate_CertificateGet_Convenience_Async() + public async Task Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); + Response response = await client.GetJobTaskCountsAsync("jobId"); } [Test] @@ -3194,7 +3786,7 @@ public void Example_BatchClient_GetJobSchedule_JobScheduleGet() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetJobSchedule("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = client.GetJobSchedule("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); @@ -3208,7 +3800,7 @@ public async Task Example_BatchClient_GetJobSchedule_JobScheduleGet_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetJobScheduleAsync("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = await client.GetJobScheduleAsync("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); @@ -3589,8 +4181,8 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd( id = "jobScheduleId", schedule = new { - doNotRunUntil = "2014-09-10T02:30:00.000Z", - doNotRunAfter = "2014-09-10T06:30:00.000Z", + doNotRunUntil = "2025-09-10T02:30:00.000Z", + doNotRunAfter = "2025-09-10T06:30:00.000Z", startWindow = "PT1M", recurrenceInterval = "PT5M", }, @@ -3653,14 +4245,14 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd( poolLifetimeOption = "jobschedule", pool = new { - vmSize = "STANDARD_D2S_V3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -3713,20 +4305,6 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd( maxTaskRetryCount = 2, waitForSuccess = true, }, - certificateReferences = new object[] - { -new -{ -thumbprint = "0123456789abcdef0123456789abcdef01234567", -thumbprintAlgorithm = "sha1", -storeLocation = "localmachine", -storeName = "Root", -visibility = new object[] -{ -"task" -}, -} - }, metadata = new object[] { new @@ -3735,7 +4313,6 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd( value = "myvalue", } }, - targetNodeCommunicationMode = "default", }, }, }, @@ -3767,8 +4344,8 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu id = "jobScheduleId", schedule = new { - doNotRunUntil = "2014-09-10T02:30:00.000Z", - doNotRunAfter = "2014-09-10T06:30:00.000Z", + doNotRunUntil = "2025-09-10T02:30:00.000Z", + doNotRunAfter = "2025-09-10T06:30:00.000Z", startWindow = "PT1M", recurrenceInterval = "PT5M", }, @@ -3831,14 +4408,14 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu poolLifetimeOption = "jobschedule", pool = new { - vmSize = "STANDARD_D2S_V3", + vmSize = "Standard_D2ds_v5", virtualMachineConfiguration = new { imageReference = new { publisher = "MicrosoftWindowsServer", offer = "WindowsServer", - sku = "2016-datacenter-smalldisk", + sku = "2025-datacenter-smalldisk", version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", @@ -3891,20 +4468,6 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu maxTaskRetryCount = 2, waitForSuccess = true, }, - certificateReferences = new object[] - { -new -{ -thumbprint = "0123456789abcdef0123456789abcdef01234567", -thumbprintAlgorithm = "sha1", -storeLocation = "localmachine", -storeName = "Root", -visibility = new object[] -{ -"task" -}, -} - }, metadata = new object[] { new @@ -3913,7 +4476,6 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu value = "myvalue", } }, - targetNodeCommunicationMode = "default", }, }, }, @@ -3942,8 +4504,8 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { - DoNotRunUntil = DateTimeOffset.Parse("2014-09-10T02:30:00.000Z"), - DoNotRunAfter = DateTimeOffset.Parse("2014-09-10T06:30:00.000Z"), + DoNotRunUntil = DateTimeOffset.Parse("2025-09-10T02:30:00.000Z"), + DoNotRunAfter = DateTimeOffset.Parse("2025-09-10T06:30:00.000Z"), StartWindow = XmlConvert.ToTimeSpan("PT1M"), RecurrenceInterval = XmlConvert.ToTimeSpan("PT5M"), }, new BatchJobSpecification(new BatchPoolInfo @@ -3951,13 +4513,13 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("STANDARD_D2S_V3") + Pool = new BatchPoolSpecification("Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -3999,14 +4561,7 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ MaxTaskRetryCount = 2, WaitForSuccess = true, }, - CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") -{ -StoreLocation = BatchCertificateStoreLocation.LocalMachine, -StoreName = "Root", -Visibility = {BatchCertificateVisibility.Task}, -}}, Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, }) @@ -4067,8 +4622,8 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { - DoNotRunUntil = DateTimeOffset.Parse("2014-09-10T02:30:00.000Z"), - DoNotRunAfter = DateTimeOffset.Parse("2014-09-10T06:30:00.000Z"), + DoNotRunUntil = DateTimeOffset.Parse("2025-09-10T02:30:00.000Z"), + DoNotRunAfter = DateTimeOffset.Parse("2025-09-10T06:30:00.000Z"), StartWindow = XmlConvert.ToTimeSpan("PT1M"), RecurrenceInterval = XmlConvert.ToTimeSpan("PT5M"), }, new BatchJobSpecification(new BatchPoolInfo @@ -4076,13 +4631,13 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("STANDARD_D2S_V3") + Pool = new BatchPoolSpecification("Standard_D2ds_v5") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", - Sku = "2016-datacenter-smalldisk", + Sku = "2025-datacenter-smalldisk", Version = "latest", }, "batch.node.windows amd64") { @@ -4124,14 +4679,7 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu MaxTaskRetryCount = 2, WaitForSuccess = true, }, - CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") -{ -StoreLocation = BatchCertificateStoreLocation.LocalMachine, -StoreName = "Root", -Visibility = {BatchCertificateVisibility.Task}, -}}, Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, - TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, }) @@ -5224,7 +5772,7 @@ public void Example_BatchClient_GetTask_TaskGet() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetTask("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = client.GetTask("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5238,7 +5786,7 @@ public async Task Example_BatchClient_GetTask_TaskGet_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetTaskAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null); + Response response = await client.GetTaskAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5436,7 +5984,7 @@ public void Example_BatchClient_GetTaskFile_GetFileFromTask() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetTaskFile("jobId", "task1", "wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null); + Response response = client.GetTaskFile("jobId", "task1", "wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5450,7 +5998,7 @@ public async Task Example_BatchClient_GetTaskFile_GetFileFromTask_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetTaskFileAsync("jobId", "task1", "wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null); + Response response = await client.GetTaskFileAsync("jobId", "task1", "wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5490,7 +6038,7 @@ public void Example_BatchClient_CreateNodeUser_NodeCreateUser() { name = "userName", isAdmin = false, - expiryTime = "2017-08-01T00:00:00Z", + expiryTime = "2025-08-01T00:00:00Z", password = "Password", }); Response response = client.CreateNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", content); @@ -5510,7 +6058,7 @@ public async Task Example_BatchClient_CreateNodeUser_NodeCreateUser_Async() { name = "userName", isAdmin = false, - expiryTime = "2017-08-01T00:00:00Z", + expiryTime = "2025-08-01T00:00:00Z", password = "Password", }); Response response = await client.CreateNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", content); @@ -5529,7 +6077,7 @@ public void Example_BatchClient_CreateNodeUser_NodeCreateUser_Convenience() BatchNodeUserCreateOptions user = new BatchNodeUserCreateOptions("userName") { IsAdmin = false, - ExpiryTime = DateTimeOffset.Parse("2017-08-01T00:00:00Z"), + ExpiryTime = DateTimeOffset.Parse("2025-08-01T00:00:00Z"), Password = "Password", }; Response response = client.CreateNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", user); @@ -5546,7 +6094,7 @@ public async Task Example_BatchClient_CreateNodeUser_NodeCreateUser_Convenience_ BatchNodeUserCreateOptions user = new BatchNodeUserCreateOptions("userName") { IsAdmin = false, - ExpiryTime = DateTimeOffset.Parse("2017-08-01T00:00:00Z"), + ExpiryTime = DateTimeOffset.Parse("2025-08-01T00:00:00Z"), Password = "Password", }; Response response = await client.CreateNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", user); @@ -5589,7 +6137,7 @@ public void Example_BatchClient_ReplaceNodeUser_NodeUpdateUser() using RequestContent content = RequestContent.Create(new { password = "12345", - expiryTime = "2016-11-27T00:45:48.7320857Z", + expiryTime = "2025-11-27T00:45:48.7320857Z", }); Response response = client.ReplaceNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", "userName", content); @@ -5607,7 +6155,7 @@ public async Task Example_BatchClient_ReplaceNodeUser_NodeUpdateUser_Async() using RequestContent content = RequestContent.Create(new { password = "12345", - expiryTime = "2016-11-27T00:45:48.7320857Z", + expiryTime = "2025-11-27T00:45:48.7320857Z", }); Response response = await client.ReplaceNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", "userName", content); @@ -5625,7 +6173,7 @@ public void Example_BatchClient_ReplaceNodeUser_NodeUpdateUser_Convenience() BatchNodeUserUpdateOptions updateOptions = new BatchNodeUserUpdateOptions { Password = "12345", - ExpiryTime = DateTimeOffset.Parse("2016-11-27T00:45:48.7320857Z"), + ExpiryTime = DateTimeOffset.Parse("2025-11-27T00:45:48.7320857Z"), }; Response response = client.ReplaceNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", "userName", updateOptions); } @@ -5641,7 +6189,7 @@ public async Task Example_BatchClient_ReplaceNodeUser_NodeUpdateUser_Convenience BatchNodeUserUpdateOptions updateOptions = new BatchNodeUserUpdateOptions { Password = "12345", - ExpiryTime = DateTimeOffset.Parse("2016-11-27T00:45:48.7320857Z"), + ExpiryTime = DateTimeOffset.Parse("2025-11-27T00:45:48.7320857Z"), }; Response response = await client.ReplaceNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", "userName", updateOptions); } @@ -5654,7 +6202,7 @@ public void Example_BatchClient_GetNode_NodeGet() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetNode("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + Response response = client.GetNode("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5668,7 +6216,7 @@ public async Task Example_BatchClient_GetNode_NodeGet_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetNodeAsync("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + Response response = await client.GetNodeAsync("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5696,6 +6244,56 @@ public async Task Example_BatchClient_GetNode_NodeGet_Convenience_Async() Response response = await client.GetNodeAsync("poolId", "tvm-1695681911_2-20161122t193202z"); } + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetNode_NodeEnableScheduling() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNode("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d", null, DateTimeOffset.Parse("Fri, 27 Jun 2025 08:55:44 GMT"), null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetNode_NodeEnableScheduling_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeAsync("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d", null, DateTimeOffset.Parse("Fri, 27 Jun 2025 08:55:44 GMT"), null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetNode_NodeEnableScheduling_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNode("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetNode_NodeEnableScheduling_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeAsync("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d"); + } + [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_DisableNodeScheduling_NodeDisableScheduling() @@ -5780,7 +6378,7 @@ public void Example_BatchClient_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSet TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetNodeRemoteLoginSettings("poolId", "tvm-1695681911_1-20161121t182739z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); + Response response = client.GetNodeRemoteLoginSettings("poolId", "tvm-1695681911_1-20161121t182739z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); @@ -5795,7 +6393,7 @@ public async Task Example_BatchClient_GetNodeRemoteLoginSettings_NodeGetRemoteLo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetNodeRemoteLoginSettingsAsync("poolId", "tvm-1695681911_1-20161121t182739z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null); + Response response = await client.GetNodeRemoteLoginSettingsAsync("poolId", "tvm-1695681911_1-20161121t182739z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); @@ -5824,6 +6422,58 @@ public async Task Example_BatchClient_GetNodeRemoteLoginSettings_NodeGetRemoteLo Response response = await client.GetNodeRemoteLoginSettingsAsync("poolId", "tvm-1695681911_1-20161121t182739z"); } + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetNodeRemoteLoginSettings_GetBatchNodeDualStackNetworkingRemote() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeRemoteLoginSettings("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d", null, DateTimeOffset.Parse("Fri, 27 Jun 2025 08:52:43 GMT"), null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); + Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetNodeRemoteLoginSettings_GetBatchNodeDualStackNetworkingRemote_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeRemoteLoginSettingsAsync("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d", null, DateTimeOffset.Parse("Fri, 27 Jun 2025 08:52:43 GMT"), null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); + Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetNodeRemoteLoginSettings_GetBatchNodeDualStackNetworkingRemote_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeRemoteLoginSettings("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetNodeRemoteLoginSettings_GetBatchNodeDualStackNetworkingRemote_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeRemoteLoginSettingsAsync("dualstackpool", "tvmps_5d8adec89961dcc011329b38df999a841f6cc815a5710678b741f04b33556ed2_d"); + } + [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs() @@ -5834,8 +6484,8 @@ public void Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs() using RequestContent content = RequestContent.Create(new { - containerUrl = "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", - startTime = "2017-11-27T00:00:00Z", + containerUrl = "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2025-12-09T18%3A51%3A00Z&sp=w&sv=2025-05-31&sr=c&sig", + startTime = "2025-11-27T00:00:00Z", }); Response response = client.UploadNodeLogs("poolId", "tvm-1695681911_1-20161121t182739z", content); @@ -5854,8 +6504,8 @@ public async Task Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs_Asyn using RequestContent content = RequestContent.Create(new { - containerUrl = "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", - startTime = "2017-11-27T00:00:00Z", + containerUrl = "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2025-12-09T18%3A51%3A00Z&sp=w&sv=2025-05-31&sr=c&sig", + startTime = "2025-11-27T00:00:00Z", }); Response response = await client.UploadNodeLogsAsync("poolId", "tvm-1695681911_1-20161121t182739z", content); @@ -5872,7 +6522,7 @@ public void Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs_Convenienc TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig"), DateTimeOffset.Parse("2017-11-27T00:00:00Z")); + UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2025-12-09T18%3A51%3A00Z&sp=w&sv=2025-05-31&sr=c&sig"), DateTimeOffset.Parse("2025-11-27T00:00:00Z")); Response response = client.UploadNodeLogs("poolId", "tvm-1695681911_1-20161121t182739z", uploadOptions); } @@ -5884,7 +6534,7 @@ public async Task Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs_Conv TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig"), DateTimeOffset.Parse("2017-11-27T00:00:00Z")); + UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2025-12-09T18%3A51%3A00Z&sp=w&sv=2025-05-31&sr=c&sig"), DateTimeOffset.Parse("2025-11-27T00:00:00Z")); Response response = await client.UploadNodeLogsAsync("poolId", "tvm-1695681911_1-20161121t182739z", uploadOptions); } @@ -5896,7 +6546,7 @@ public void Example_BatchClient_GetNodeExtension_GetBatchNodeExtension() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetNodeExtension("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + Response response = client.GetNodeExtension("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5910,7 +6560,7 @@ public async Task Example_BatchClient_GetNodeExtension_GetBatchNodeExtension_Asy TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetNodeExtensionAsync("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + Response response = await client.GetNodeExtensionAsync("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5972,7 +6622,7 @@ public void Example_BatchClient_GetNodeFile_GetFileFromComputeNode() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = client.GetNodeFile("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null); + Response response = client.GetNodeFile("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -5986,7 +6636,7 @@ public async Task Example_BatchClient_GetNodeFile_GetFileFromComputeNode_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - Response response = await client.GetNodeFileAsync("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null); + Response response = await client.GetNodeFileAsync("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null); JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; Console.WriteLine(result.ToString()); @@ -6022,7 +6672,7 @@ public void Example_BatchClient_GetApplications_ListApplications() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetApplications(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null)) + foreach (BinaryData item in client.GetApplications(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("id").ToString()); @@ -6039,7 +6689,7 @@ public async Task Example_BatchClient_GetApplications_ListApplications_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetApplicationsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null)) + await foreach (BinaryData item in client.GetApplicationsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("id").ToString()); @@ -6082,7 +6732,7 @@ public void Example_BatchClient_GetPoolUsageMetrics_PoolListUsageMetrics() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetPoolUsageMetrics(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + foreach (BinaryData item in client.GetPoolUsageMetrics(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolId").ToString()); @@ -6101,7 +6751,7 @@ public async Task Example_BatchClient_GetPoolUsageMetrics_PoolListUsageMetrics_A TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetPoolUsageMetricsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + await foreach (BinaryData item in client.GetPoolUsageMetricsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolId").ToString()); @@ -6146,7 +6796,7 @@ public void Example_BatchClient_GetPools_PoolList() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetPools(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + foreach (BinaryData item in client.GetPools(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6161,7 +6811,7 @@ public async Task Example_BatchClient_GetPools_PoolList_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetPoolsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + await foreach (BinaryData item in client.GetPoolsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6202,7 +6852,7 @@ public void Example_BatchClient_GetSupportedImages_AccountListNodeAgentSkus() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetSupportedImages(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) + foreach (BinaryData item in client.GetSupportedImages(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); @@ -6220,7 +6870,7 @@ public async Task Example_BatchClient_GetSupportedImages_AccountListNodeAgentSku TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetSupportedImagesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) + await foreach (BinaryData item in client.GetSupportedImagesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); @@ -6264,7 +6914,7 @@ public void Example_BatchClient_GetPoolNodeCounts_NodeCountsPayload() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetPoolNodeCounts(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) + foreach (BinaryData item in client.GetPoolNodeCounts(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolId").ToString()); @@ -6279,7 +6929,7 @@ public async Task Example_BatchClient_GetPoolNodeCounts_NodeCountsPayload_Async( TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetPoolNodeCountsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) + await foreach (BinaryData item in client.GetPoolNodeCountsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolId").ToString()); @@ -6320,7 +6970,7 @@ public void Example_BatchClient_GetJobs_JobList() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetJobs(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + foreach (BinaryData item in client.GetJobs(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -6335,7 +6985,7 @@ public async Task Example_BatchClient_GetJobs_JobList_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetJobsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + await foreach (BinaryData item in client.GetJobsAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -6376,7 +7026,7 @@ public void Example_BatchClient_GetJobsFromSchedules_ListJobUnderJobSchedule() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetJobsFromSchedules("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + foreach (BinaryData item in client.GetJobsFromSchedules("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -6391,7 +7041,7 @@ public async Task Example_BatchClient_GetJobsFromSchedules_ListJobUnderJobSchedu TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetJobsFromSchedulesAsync("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + await foreach (BinaryData item in client.GetJobsFromSchedulesAsync("jobScheduleId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("poolInfo").ToString()); @@ -6432,7 +7082,7 @@ public void Example_BatchClient_GetJobPreparationAndReleaseTaskStatuses_JobListP TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatuses("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) + foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatuses("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6447,7 +7097,7 @@ public async Task Example_BatchClient_GetJobPreparationAndReleaseTaskStatuses_Jo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatusesAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) + await foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatusesAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6480,66 +7130,6 @@ public async Task Example_BatchClient_GetJobPreparationAndReleaseTaskStatuses_Jo } } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_GetCertificates_CertificateList() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - foreach (BinaryData item in client.GetCertificates(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) - { - JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; - Console.WriteLine(result.GetProperty("thumbprint").ToString()); - Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); - Console.WriteLine(result.GetProperty("data").ToString()); - } - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_GetCertificates_CertificateList_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - await foreach (BinaryData item in client.GetCertificatesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) - { - JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; - Console.WriteLine(result.GetProperty("thumbprint").ToString()); - Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); - Console.WriteLine(result.GetProperty("data").ToString()); - } - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_GetCertificates_CertificateList_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - foreach (BatchCertificate item in client.GetCertificates()) - { - } - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_GetCertificates_CertificateList_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - await foreach (BatchCertificate item in client.GetCertificatesAsync()) - { - } - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_GetJobSchedules_JobScheduleList() @@ -6548,7 +7138,7 @@ public void Example_BatchClient_GetJobSchedules_JobScheduleList() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetJobSchedules(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + foreach (BinaryData item in client.GetJobSchedules(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); @@ -6563,7 +7153,7 @@ public async Task Example_BatchClient_GetJobSchedules_JobScheduleList_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetJobSchedulesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + await foreach (BinaryData item in client.GetJobSchedulesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); @@ -6604,7 +7194,7 @@ public void Example_BatchClient_GetTasks_TaskList() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetTasks("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + foreach (BinaryData item in client.GetTasks("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6619,7 +7209,7 @@ public async Task Example_BatchClient_GetTasks_TaskList_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetTasksAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null, null)) + await foreach (BinaryData item in client.GetTasksAsync("jobId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6660,7 +7250,7 @@ public void Example_BatchClient_GetSubTasks_TaskListSubtasks() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetSubTasks("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null)) + foreach (BinaryData item in client.GetSubTasks("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6675,7 +7265,7 @@ public async Task Example_BatchClient_GetSubTasks_TaskListSubtasks_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetSubTasksAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null)) + await foreach (BinaryData item in client.GetSubTasksAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6716,7 +7306,7 @@ public void Example_BatchClient_GetTaskFiles_FileListFromTask() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetTaskFiles("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, false, null)) + foreach (BinaryData item in client.GetTaskFiles("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, false, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6731,7 +7321,7 @@ public async Task Example_BatchClient_GetTaskFiles_FileListFromTask_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetTaskFilesAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, false, null)) + await foreach (BinaryData item in client.GetTaskFilesAsync("jobId", "taskId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, false, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6772,7 +7362,7 @@ public void Example_BatchClient_GetNodes_NodeList() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetNodes("poolId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) + foreach (BinaryData item in client.GetNodes("poolId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6787,7 +7377,7 @@ public async Task Example_BatchClient_GetNodes_NodeList_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetNodesAsync("poolId", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) + await foreach (BinaryData item in client.GetNodesAsync("poolId", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6828,7 +7418,7 @@ public void Example_BatchClient_GetNodeExtensions_ListComputeNodeExtensions() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetNodeExtensions("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) + foreach (BinaryData item in client.GetNodeExtensions("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6843,7 +7433,7 @@ public async Task Example_BatchClient_GetNodeExtensions_ListComputeNodeExtension TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetNodeExtensionsAsync("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null)) + await foreach (BinaryData item in client.GetNodeExtensionsAsync("poolId", "tvm-1695681911_2-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6884,7 +7474,7 @@ public void Example_BatchClient_GetNodeFiles_FileListFromNode() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - foreach (BinaryData item in client.GetNodeFiles("poolId", "tvm-1695681911_1-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, false, null)) + foreach (BinaryData item in client.GetNodeFiles("poolId", "tvm-1695681911_1-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, false, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); @@ -6899,7 +7489,7 @@ public async Task Example_BatchClient_GetNodeFiles_FileListFromNode_Async() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - await foreach (BinaryData item in client.GetNodeFilesAsync("poolId", "tvm-1695681911_1-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, false, null)) + await foreach (BinaryData item in client.GetNodeFilesAsync("poolId", "tvm-1695681911_1-20161122t193202z", null, DateTimeOffset.Parse("Fri, 17 Feb 2025 00:00:00 GMT"), null, null, false, null)) { JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchCertificatesIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchCertificatesIntegrationTests.cs deleted file mode 100644 index 1c42180098df..000000000000 --- a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchCertificatesIntegrationTests.cs +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Collections.Generic; -using System.Drawing; -using System.Globalization; -using System.IO; -using System.Linq; -using System.Security.Cryptography.X509Certificates; -using System.Text; -using System.Threading.Tasks; -using Azure.Compute.Batch.Tests.Common; -using Azure.Compute.Batch.Tests.Infrastructure; -using Azure.Core.TestFramework; -using NUnit.Framework; -using Org.BouncyCastle.Asn1.Cmp; -using Org.BouncyCastle.Pkcs; - -namespace Azure.Compute.Batch.Tests.Integration -{ - public class BatchCertificatesIntegrationTests : BatchLiveTestBase - { - /// - /// Initializes a new instance of the class. - /// - /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. - public BatchCertificatesIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) - { - } - - /// - /// Initializes a new instance of the class. - /// - /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. - public BatchCertificatesIntegrationTests(bool isAsync) : base(isAsync) - { - } - - [LiveOnly] - public async Task CreateCertificate() - { - var client = CreateBatchClient(); - - const string certificatePrefix = "testcertificatecrud"; - string cerFilePath = CertificateBuilder.GetTemporaryCertificateFilePath(string.Format("{0}.cer", certificatePrefix)); - string pfxFilePath = CertificateBuilder.GetTemporaryCertificateFilePath(string.Format("{0}.pfx", certificatePrefix)); - List certificates = null; - - try - { - certificates = await GenerateCertificatesAsync(client, cerFilePath, pfxFilePath); - Assert.IsNotNull(certificates); - Assert.AreEqual(1, certificates.Count()); - } - finally - { - // Delete the certificate - // Delete the certificate files - if (File.Exists(cerFilePath)) - { - File.Delete(cerFilePath); - if (certificates != null && certificates.Count > 0) - await client.DeleteCertificateAsync(certificates[0].ThumbprintAlgorithm, certificates[0].Thumbprint); - } - if (File.Exists(pfxFilePath)) - { - File.Delete(pfxFilePath); - if (certificates != null && certificates.Count > 1) - await client.DeleteCertificateAsync(certificates[1].ThumbprintAlgorithm, certificates[1].Thumbprint); - } - } - } - - [LiveOnly] - public async Task DeleteCertificate() - { - var client = CreateBatchClient(); - - const string certificatePrefix = "testcertificatecrud"; - string cerFilePath = CertificateBuilder.GetTemporaryCertificateFilePath(string.Format("{0}.cer", certificatePrefix)); - string cerFilePath2 = CertificateBuilder.GetTemporaryCertificateFilePath(string.Format("{0}.cer", certificatePrefix+"2")); - int count = 0; - try - { - await GenerateCertificatesAsync(client, cerFilePath, ""); - await GenerateCertificatesAsync(client, cerFilePath2, "",2); - - await foreach (BatchCertificate item in client.GetCertificatesAsync()) - { - // delete - await client.DeleteCertificateAsync(item.ThumbprintAlgorithm, item.Thumbprint); - count++; - } - } - finally - { - Assert.AreEqual(2, count); - // Delete the certificate files - if (File.Exists(cerFilePath)) - { - File.Delete(cerFilePath); - } - if (File.Exists(cerFilePath2)) - { - File.Delete(cerFilePath2); - } - } - } - - [LiveOnly] - public async Task PoolCreateAndUpdateWithCertificates() - { - const string certificatePrefix = "testcertificatecrud"; - var client = CreateBatchClient(); - List certificates = null; - string cerFilePath = CertificateBuilder.GetTemporaryCertificateFilePath(string.Format("{0}.cer", certificatePrefix)); - - WindowsPoolFixture iaasWindowsPoolFixture = new WindowsPoolFixture(client, "CertPool", IsPlayBack()); - var poolID = iaasWindowsPoolFixture.PoolId; - - try - { - certificates = await GenerateCertificatesAsync(client, cerFilePath, ""); - Assert.IsNotNull(certificates); - Assert.AreEqual(1, certificates.Count()); - - // create a pool to verify we have something to query for - BatchPoolCreateOptions batchPoolCreateOptions = iaasWindowsPoolFixture.CreatePoolOptions(); - batchPoolCreateOptions.CertificateReferences.Add( - new BatchCertificateReference(certificates[0].Thumbprint, certificates[0].ThumbprintAlgorithm) - ); - batchPoolCreateOptions.CertificateReferences[0].Visibility.Add(BatchCertificateVisibility.RemoteUser); - Response response = await client.CreatePoolAsync(batchPoolCreateOptions); - BatchPool certPool = await iaasWindowsPoolFixture.WaitForPoolAllocation(client, iaasWindowsPoolFixture.PoolId); - - // verify autoscale settings - Assert.IsNotNull(certPool); - Assert.AreEqual(1, certPool.CertificateReferences.Count); - Assert.AreEqual(certificates[0].Thumbprint, certPool.CertificateReferences[0].Thumbprint); - Assert.AreEqual(certificates[0].ThumbprintAlgorithm, certPool.CertificateReferences[0].ThumbprintAlgorithm); - } - finally - { - //await client.DeletePoolAsync(poolID); - if (File.Exists(cerFilePath)) - { - File.Delete(cerFilePath); - if (certificates != null && certificates.Count > 0) - await client.DeleteCertificateAsync(certificates[0].ThumbprintAlgorithm, certificates[0].Thumbprint); - } - - await client.DeletePoolAsync(poolID); - } - } - - [LiveOnly] - public async Task ReplaceCertPool() - { - const string certificatePrefix = "testcertificatecrud"; - var client = CreateBatchClient(); - List certificates = null; - string cerFilePath = CertificateBuilder.GetTemporaryCertificateFilePath(string.Format("{0}.cer", certificatePrefix)); - - WindowsPoolFixture iaasWindowsPoolFixture = new WindowsPoolFixture(client, "ReplaceCertPool", IsPlayBack()); - var poolID = iaasWindowsPoolFixture.PoolId; - - try - { - certificates = await GenerateCertificatesAsync(client, cerFilePath, ""); - Assert.IsNotNull(certificates); - Assert.AreEqual(1, certificates.Count()); - - // create a pool to verify we have something to query for - BatchPool orginalPool = await iaasWindowsPoolFixture.CreatePoolAsync(0); - - // replace pool - BatchApplicationPackageReference[] batchApplicationPackageReferences = new BatchApplicationPackageReference[] { - }; - - BatchMetadataItem[] metadataIems = new BatchMetadataItem[] { - new BatchMetadataItem("name", "value") - }; - - BatchCertificateReference[] certificateReferences = new BatchCertificateReference[] { - new BatchCertificateReference(certificates[0].Thumbprint, certificates[0].ThumbprintAlgorithm) - }; - - BatchPoolReplaceOptions replaceContent = new BatchPoolReplaceOptions(certificateReferences, batchApplicationPackageReferences, metadataIems); - Response response = await client.ReplacePoolPropertiesAsync(poolID, replaceContent); - BatchPool replacePool = await client.GetPoolAsync(poolID); - Assert.AreEqual(replacePool.Metadata.First().Value, "value"); - Assert.AreEqual(1, replacePool.CertificateReferences.Count); - Assert.AreEqual(certificates[0].Thumbprint, replacePool.CertificateReferences[0].Thumbprint); - Assert.AreEqual(certificates[0].ThumbprintAlgorithm, replacePool.CertificateReferences[0].ThumbprintAlgorithm); - } - finally - { - //await client.DeletePoolAsync(poolID); - if (File.Exists(cerFilePath)) - { - File.Delete(cerFilePath); - if (certificates != null && certificates.Count > 0) - await client.DeleteCertificateAsync(certificates[0].ThumbprintAlgorithm, certificates[0].Thumbprint); - } - - await client.DeletePoolAsync(poolID); - } - } - - private async Task> GenerateCertificatesAsync(BatchClient batchClient, string cerFilePath, string pfxFilePath, long seed=1) - { - X509Certificate2 cerCert = CertificateBuilder.CreateSelfSignedInFile2("Foo", cerFilePath, CertificateBuilder.Sha1Algorithm,seed:seed); - BatchCertificate cerCertificate = new BatchCertificate(cerCert.Thumbprint, "sha1", BinaryData.FromBytes(cerCert.GetRawCertData())) - { - CertificateFormat = BatchCertificateFormat.Cer, - Password = "", - }; - - Response response = await batchClient.CreateCertificateAsync(cerCertificate); - - BatchCertificate cerCertificateResponse = await batchClient.GetCertificateAsync(cerCertificate.ThumbprintAlgorithm, cerCertificate.Thumbprint); - - return new List - { - cerCertificateResponse//, - // pfxCertificateResponse - }; - } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchNodeIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchNodeIntegrationTests.cs index 9b1797b7a113..75441f72ca3a 100644 --- a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchNodeIntegrationTests.cs +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchNodeIntegrationTests.cs @@ -281,7 +281,11 @@ public async Task GetRemoteLoginSettings() batchPoolCreateOptions.NetworkConfiguration = new NetworkConfiguration() { - EndpointConfiguration = batchPoolEndpointConfiguration + EndpointConfiguration = batchPoolEndpointConfiguration, + PublicIpAddressConfiguration = new BatchPublicIpAddressConfiguration + { + IpFamilies = { IPFamily.IPv4, IPFamily.IPv6 }, + } }; // create a pool to verify we have something to query for @@ -298,6 +302,8 @@ public async Task GetRemoteLoginSettings() BatchNodeRemoteLoginSettings batchNodeRemoteLoginSettings = await client.GetNodeRemoteLoginSettingsAsync(poolID, batchNodeID); Assert.NotNull(batchNodeRemoteLoginSettings); Assert.NotNull(batchNodeRemoteLoginSettings.RemoteLoginIpAddress); + Assert.NotNull(batchNodeRemoteLoginSettings.Ipv6RemoteLoginPort); + Assert.NotNull(batchNodeRemoteLoginSettings.Ipv6RemoteLoginIpAddress); } finally { @@ -325,7 +331,7 @@ public async Task Scheduling() Assert.IsNotEmpty(batchNodeID); BatchNodeDisableSchedulingOptions batchNodeDisableSchedulingContent = new BatchNodeDisableSchedulingOptions() { - NodeDisableSchedulingOption = BatchNodeDisableSchedulingOption.TaskCompletion, + NodeDisableSchedulingOption = BatchNodeDisableSchedulingOption.Terminate, }; Response response = await client.DisableNodeSchedulingAsync(poolID, batchNodeID, batchNodeDisableSchedulingContent); Assert.AreEqual(200, response.Status); diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchPoolIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchPoolIntegrationTests.cs index 4cabce80a20b..b006f605da25 100644 --- a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchPoolIntegrationTests.cs +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchPoolIntegrationTests.cs @@ -212,7 +212,19 @@ public async Task PoolCreatedOsDiskSecurityProfile() { SecureBootEnabled = true, VTpmEnabled = true, - } + }, + ProxyAgentSettings = new ProxyAgentSettings + { + Imds = new HostEndpointSettings + { + Mode = HostEndpointSettingsModeTypes.Audit, + }, + Enabled = false, + //WireServer = new HostEndpointSettings + //{ + // InVmAccessControlProfileReferenceId = "id2", + //}, + }, }, OsDisk = new BatchOsDisk() { @@ -241,6 +253,8 @@ public async Task PoolCreatedOsDiskSecurityProfile() Assert.AreEqual(pool.VirtualMachineConfiguration.SecurityProfile.EncryptionAtHost, false); Assert.AreEqual(pool.VirtualMachineConfiguration.SecurityProfile.UefiSettings.SecureBootEnabled, true); Assert.AreEqual(pool.VirtualMachineConfiguration.SecurityProfile.UefiSettings.VTpmEnabled, true); + Assert.AreEqual(pool.VirtualMachineConfiguration.SecurityProfile.ProxyAgentSettings.Enabled, false); + Assert.AreEqual(pool.VirtualMachineConfiguration.SecurityProfile.ProxyAgentSettings.Imds.Mode, HostEndpointSettingsModeTypes.Audit); Assert.AreEqual(pool.VirtualMachineConfiguration.OsDisk.Caching, CachingType.ReadWrite); Assert.AreEqual(pool.VirtualMachineConfiguration.OsDisk.ManagedDisk.SecurityProfile.SecurityEncryptionType, SecurityEncryptionTypes.VMGuestStateOnly); }catch (RequestFailedException e) @@ -311,9 +325,7 @@ public async Task ReplacePool() new BatchMetadataItem("name", "value") }; - BatchCertificateReference[] certificateReferences = new BatchCertificateReference[] { }; - - BatchPoolReplaceOptions replaceContent = new BatchPoolReplaceOptions(certificateReferences, batchApplicationPackageReferences, metadataIems); + BatchPoolReplaceOptions replaceContent = new BatchPoolReplaceOptions(batchApplicationPackageReferences, metadataIems); Response response = await client.ReplacePoolPropertiesAsync(poolID, replaceContent); BatchPool replacePool = await client.GetPoolAsync(poolID); Assert.AreEqual(replacePool.Metadata.First().Value, "value"); @@ -398,10 +410,10 @@ public async Task PatchPool() } ); - updateContent.ResourceTags.Add("tag1", "value1"); - updateContent.ResourceTags.Add("tag2", "value2"); - - updateContent.TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Pack); + updateContent.TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Pack) + { + JobDefaultOrder = BatchJobDefaultOrder.CreationTime, + }; updateContent.UserAccounts.Add(new UserAccount("test1", nodeUserPassword)); updateContent.UserAccounts.Add(new UserAccount("test2", nodeUserPassword) { ElevationLevel = ElevationLevel.NonAdmin }); @@ -416,10 +428,9 @@ public async Task PatchPool() Assert.AreEqual(updateContent.Metadata.Single().Name , patchPool.Metadata.Single().Name); Assert.AreEqual(updateContent.Metadata.Single().Value, patchPool.Metadata.Single().Value); Assert.AreEqual(displayName, patchPool.DisplayName); - Assert.AreEqual(2, patchPool.ResourceTags.Count); - Assert.AreEqual("value1", patchPool.ResourceTags["tag1"]); Assert.AreEqual(20, patchPool.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent); Assert.AreEqual(BatchNodeFillType.Pack, patchPool.TaskSchedulingPolicy.NodeFillType); + Assert.AreEqual(BatchJobDefaultOrder.CreationTime, patchPool.TaskSchedulingPolicy.JobDefaultOrder); Assert.AreEqual(4, patchPool.UserAccounts.Count); Assert.AreEqual("standard_d2s_v3", patchPool.VmSize); Assert.AreEqual(1, patchPool.TaskSlotsPerNode); diff --git a/sdk/batch/Azure.Compute.Batch/tests/Samples/MigrationSnippets.cs b/sdk/batch/Azure.Compute.Batch/tests/Samples/MigrationSnippets.cs index 6b0ad5feee26..1980306b9a9e 100644 --- a/sdk/batch/Azure.Compute.Batch/tests/Samples/MigrationSnippets.cs +++ b/sdk/batch/Azure.Compute.Batch/tests/Samples/MigrationSnippets.cs @@ -103,15 +103,7 @@ public void BatchUpdatePool() } }; - BatchCertificateReference[] certificateReferences = new BatchCertificateReference[] { - new BatchCertificateReference("thumbprint","thumbprintAlgorithm") - { - StoreLocation = "storeLocation", - StoreName = "storeName" - } - }; - - BatchPoolReplaceOptions replaceOptions = new BatchPoolReplaceOptions(certificateReferences, batchApplicationPackageReferences, metadataItems); + BatchPoolReplaceOptions replaceOptions = new BatchPoolReplaceOptions(batchApplicationPackageReferences, metadataItems); batchClient.ReplacePoolProperties("poolID", replaceOptions); #endregion } @@ -825,94 +817,5 @@ public void BatchGetApplications() } #endregion } - - public void BatchGetCertificate() - { - #region Snippet:Batch_Migration_GetCertificate - BatchClient batchClient = new BatchClient( - new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - - BatchCertificate cerCertificateResponse = batchClient.GetCertificate("ThumbprintAlgorithm", "Thumbprint"); - #endregion - } - - public void BatchCreateCertificate() - { - #region Snippet:Batch_Migration_CreateCerCertificate - BatchClient batchClient = new BatchClient( - new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - byte[] certData = File.ReadAllBytes("certPath"); - BatchCertificate cerCertificate = new BatchCertificate("Thumbprint", "ThumbprintAlgorithm", BinaryData.FromBytes(certData)) - { - CertificateFormat = BatchCertificateFormat.Cer, - Password = "", - }; - - Response response = batchClient.CreateCertificate(cerCertificate); - #endregion - } - - public void BatchCreatePfxCerrtificate() - { - #region Snippet:Batch_Migration_CreatePfxCertificate - BatchClient batchClient = new BatchClient( - new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - - byte[] certData = File.ReadAllBytes("certPath"); - BatchCertificate cerCertificate = new BatchCertificate("Thumbprint", "ThumbprintAlgorithm", BinaryData.FromBytes(certData)) - { - CertificateFormat = BatchCertificateFormat.Pfx, - Password = "password", - }; - - Response response = batchClient.CreateCertificate(cerCertificate); - #endregion - } - - public void BatchListCerrtificate() - { - #region Snippet:Batch_Migration_ListCertificate - BatchClient batchClient = new BatchClient( - new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - - foreach (BatchCertificate item in batchClient.GetCertificates()) - { - // do something - } - #endregion - } - - public void BatchDeleteCerrtificateOperation() - { - #region Snippet:Batch_Migration_DeleteCertificate_Operation - BatchClient batchClient = new BatchClient( - new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - - DeleteCertificateOperation operation = batchClient.DeleteCertificate("ThumbprintAlgorithm", "Thumbprint"); - - // Optional, wait for operation to complete - operation.WaitForCompletion(); - #endregion - } - - public void BatchDeleteCerrtificate() - { - #region Snippet:Batch_Migration_DeleteCertificate - BatchClient batchClient = new BatchClient( - new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - - batchClient.DeleteCertificate("ThumbprintAlgorithm", "Thumbprint"); - #endregion - } - - public void BatchCancelDeleteCerrtificate() - { - #region Snippet:Batch_Migration_CancelDeleteCertificate - BatchClient batchClient = new BatchClient( - new Uri("https://.eastus.batch.azure.com"), new DefaultAzureCredential()); - - batchClient.CancelCertificateDeletion("ThumbprintAlgorithm", "Thumbprint"); - #endregion - } } } diff --git a/sdk/batch/Azure.Compute.Batch/tests/UnitTests/LongRunningOperationsUnitTests.cs b/sdk/batch/Azure.Compute.Batch/tests/UnitTests/LongRunningOperationsUnitTests.cs index 4674ad4cbd24..d26d44ba47f9 100644 --- a/sdk/batch/Azure.Compute.Batch/tests/UnitTests/LongRunningOperationsUnitTests.cs +++ b/sdk/batch/Azure.Compute.Batch/tests/UnitTests/LongRunningOperationsUnitTests.cs @@ -29,8 +29,8 @@ public async Task DeallocateNodeOperations_Normal() string poolId = "pool1"; int CallsToGetNode = 0; - BatchNode batchNodeDeallocating = new BatchNode(NodeId, null, BatchNodeState.Deallocating, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); - BatchNode batchNodeDeallocated = new BatchNode(NodeId, null, BatchNodeState.Deallocated, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeDeallocating = new BatchNode(NodeId, null, BatchNodeState.Deallocating, null, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeDeallocated = new BatchNode(NodeId, null, BatchNodeState.Deallocated, null, null, null, null, null,null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetNodeAsync( @@ -119,7 +119,7 @@ public async Task DeallocateNodeOperations_InternalError() string poolId = "pool1"; int CallsToGetNode = 0; - BatchNode batchNodeDeallocated = new BatchNode(NodeId, null, BatchNodeState.Deallocated, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeDeallocated = new BatchNode(NodeId, null, BatchNodeState.Deallocated, null, null, null, null, null, null,"affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetNodeAsync( @@ -177,7 +177,7 @@ public async Task DeallocateNodeOperations_ServiceBusy() string poolId = "pool1"; int CallsToGetNode = 0; - BatchNode batchNodeDeallocated = new BatchNode(NodeId, null, BatchNodeState.Deallocated, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeDeallocated = new BatchNode(NodeId, null, BatchNodeState.Deallocated, null, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetNodeAsync( @@ -222,54 +222,6 @@ public async Task DeallocateNodeOperations_ServiceBusy() } } - [Test] - /// - /// Verify that normal delete certificate flow succeeds - /// - public async Task DeleteCertificateOperation_Normal() - { - // Arrange - var mockResponse = new MockResponse(200).AddHeader("Retry-After", "0"); - string Thumbprint = "thumbprint"; - string ThumbprintAlgorithm = "thumbprintAlgorithm"; - int CallsToGet = 0; - - BatchCertificate batchCertificate = new BatchCertificate(Thumbprint, ThumbprintAlgorithm, null, null, null, null, null, null, null, null, null, null, null); - - Mock clientMock = new Mock(); - clientMock.Setup(c => c.GetCertificateAsync( - It.IsAny(), - It.IsAny(), - It.IsAny(), - It.IsAny(), - It.IsAny>(), - It.IsAny()) - ) - .ReturnsAsync((string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, CancellationToken cancellationToken) => - { - if (CallsToGet++ <= 2) - { - // return a certificate that is deleting - return Response.FromValue(batchCertificate, mockResponse); - } - else - { - throw new RequestFailedException(status: 404, message: "Not Found", errorCode: BatchErrorCode.CertificateNotFound.ToString(), null); - } - } - ); - - BatchClient batchClient = clientMock.Object; - - // Act - DeleteCertificateOperation deleteCertificateOperation = new DeleteCertificateOperation(batchClient, Thumbprint + ";" + ThumbprintAlgorithm); - await deleteCertificateOperation.WaitForCompletionAsync().ConfigureAwait(false); - - // Assert - Assert.IsTrue(deleteCertificateOperation.HasCompleted); - Assert.IsTrue(deleteCertificateOperation.HasValue); - } - [Test] /// /// Verify that normal delete job flow succeeds @@ -720,7 +672,7 @@ public async Task DeletePoolOperation_Normal() string PoolID = "pool"; int CallsToGet = 0; - BatchPool batchPool = new BatchPool(PoolID, null,null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Deleting, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null,null,null, null,null,null, null, null, null, null, null); + BatchPool batchPool = new BatchPool(PoolID, null,null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Deleting, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null,null,null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetPoolAsync( @@ -767,8 +719,8 @@ public async Task DeletePoolOperation_NewInstance() string PoolID = "pool"; int CallsToGet = 0; - BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Deleting, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); - BatchPool batchPoolNew = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow.AddMinutes(1), BatchPoolState.Active, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Deleting, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPoolNew = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow.AddMinutes(1), BatchPoolState.Active, null, null, null, null,null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetPoolAsync( @@ -816,8 +768,8 @@ public async Task DeletePoolOperation_NewState() int CallsToGet = 0; DateTimeOffset creationTime = DateTimeOffset.UtcNow; - BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, creationTime, BatchPoolState.Deleting, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); - BatchPool batchPoolNew = new BatchPool(PoolID, null, null, null, null, creationTime, BatchPoolState.Active, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, creationTime, BatchPoolState.Deleting, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPoolNew = new BatchPool(PoolID, null, null, null, null, creationTime, BatchPoolState.Active, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetPoolAsync( @@ -964,8 +916,8 @@ public async Task RebootNodeOperation_Normal() string poolId = "pool1"; int CallsToGetNode = 0; - BatchNode batchNodeRebooting = new BatchNode(NodeId, null, BatchNodeState.Rebooting, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); - BatchNode batchNodeIdle = new BatchNode(NodeId, null, BatchNodeState.Idle, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeRebooting = new BatchNode(NodeId, null, BatchNodeState.Rebooting, null, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeIdle = new BatchNode(NodeId, null, BatchNodeState.Idle, null, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetNodeAsync( @@ -1015,8 +967,8 @@ public async Task ReimageNodeOperation_Normal() string poolId = "pool1"; int CallsToGetNode = 0; - BatchNode batchNodeRebooting = new BatchNode(NodeId, null, BatchNodeState.Reimaging, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); - BatchNode batchNodeIdle = new BatchNode(NodeId, null, BatchNodeState.Idle, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeRebooting = new BatchNode(NodeId, null, BatchNodeState.Reimaging, null, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeIdle = new BatchNode(NodeId, null, BatchNodeState.Idle, null, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetNodeAsync( @@ -1065,8 +1017,8 @@ public async Task RemoveNodesOperation_Normal() string PoolID = "pool1"; int CallsToGet = 0; - BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Resizing, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); - BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Resizing, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetPoolAsync( @@ -1116,8 +1068,8 @@ public async Task RemoveNodesOperation_PoolDeleted() string PoolID = "pool1"; int CallsToGet = 0; - BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Resizing, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); - BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Resizing, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetPoolAsync( @@ -1165,8 +1117,8 @@ public async Task ResizePoolOperation_Normal() string PoolID = "pool1"; int CallsToGet = 0; - BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Resizing, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); - BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Resizing, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetPoolAsync( @@ -1216,8 +1168,8 @@ public async Task StartNodeOperation_Normal() string PoolID = "pool1"; int CallsToGet = 0; - BatchNode batchNodeStarting = new BatchNode(NodeId, null, BatchNodeState.Starting, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); - BatchNode batchNodeIdle = new BatchNode(NodeId, null, BatchNodeState.Idle, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeStarting = new BatchNode(NodeId, null, BatchNodeState.Starting, null, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); + BatchNode batchNodeIdle = new BatchNode(NodeId, null, BatchNodeState.Idle, null, null, null, null, null, null, "affinityId", "vmSize", 0, 0, 0, 0, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetNodeAsync( @@ -1266,8 +1218,8 @@ public async Task StopPoolResizeOperation_Normal() string PoolID = "pool1"; int CallsToGet = 0; - BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Stopping, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); - BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Stopping, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetPoolAsync( @@ -1317,8 +1269,8 @@ public async Task StopPoolResizeOperation_PoolDeleted() string PoolID = "pool1"; int CallsToGet = 0; - BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Stopping, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); - BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPool = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Stopping, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); + BatchPool batchPoolFinished = new BatchPool(PoolID, null, null, null, null, DateTimeOffset.UtcNow, BatchPoolState.Active, null, AllocationState.Steady, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); Mock clientMock = new Mock(); clientMock.Setup(c => c.GetPoolAsync( diff --git a/sdk/batch/Azure.Compute.Batch/tsp-location.yaml b/sdk/batch/Azure.Compute.Batch/tsp-location.yaml index 75996bbb62d2..826d336a1b8a 100644 --- a/sdk/batch/Azure.Compute.Batch/tsp-location.yaml +++ b/sdk/batch/Azure.Compute.Batch/tsp-location.yaml @@ -1,3 +1,3 @@ directory: specification/batch/Azure.Batch -commit: dfa17e7ea91cf080769d7df32c349a35ccaa49ad +commit: fdf0eec2e8c992498ec053810d611ac6b6e267cb repo: Azure/azure-rest-api-specs