diff --git a/internal/provider/services.go b/internal/provider/services.go index a2d122871811..728cda9ec5d0 100644 --- a/internal/provider/services.go +++ b/internal/provider/services.go @@ -114,6 +114,7 @@ func SupportedTypedServices() []sdk.TypedServiceRegistration { mssql.Registration{}, policy.Registration{}, resource.Registration{}, + storage.Registration{}, streamanalytics.Registration{}, web.Registration{}, } diff --git a/internal/resourceproviders/required.go b/internal/resourceproviders/required.go index 510987ff96c9..21d1d31224fb 100644 --- a/internal/resourceproviders/required.go +++ b/internal/resourceproviders/required.go @@ -71,6 +71,7 @@ func Required() map[string]struct{} { "Microsoft.ServiceFabricMesh": {}, "Microsoft.Sql": {}, "Microsoft.Storage": {}, + "Microsoft.StoragePool": {}, "Microsoft.StreamAnalytics": {}, "Microsoft.TimeSeriesInsights": {}, "Microsoft.Web": {}, diff --git a/internal/services/network/subnet_resource.go b/internal/services/network/subnet_resource.go index 1efdb63ce0b8..e53ff100c63c 100644 --- a/internal/services/network/subnet_resource.go +++ b/internal/services/network/subnet_resource.go @@ -134,7 +134,7 @@ func resourceSubnet() *pluginsdk.Resource { "Microsoft.ServiceFabricMesh/networks", "Microsoft.Sql/managedInstances", "Microsoft.Sql/servers", - "Microsoft.StoragePool/diskpools", + "Microsoft.StoragePool/diskPools", "Microsoft.StreamAnalytics/streamingJobs", "Microsoft.Synapse/workspaces", "Microsoft.Web/hostingEnvironments", @@ -150,6 +150,7 @@ func resourceSubnet() *pluginsdk.Resource { Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ "Microsoft.Network/networkinterfaces/*", + "Microsoft.Network/virtualNetworks/read", "Microsoft.Network/virtualNetworks/subnets/action", "Microsoft.Network/virtualNetworks/subnets/join/action", "Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action", diff --git a/internal/services/storage/client/client.go b/internal/services/storage/client/client.go index 52d3ecc8f78b..105bcfd711de 100644 --- a/internal/services/storage/client/client.go +++ b/internal/services/storage/client/client.go @@ -6,6 +6,7 @@ import ( legacystorage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool" "github.com/Azure/azure-sdk-for-go/services/storagesync/mgmt/2020-03-01/storagesync" "github.com/Azure/go-autorest/autorest" az "github.com/Azure/go-autorest/autorest/azure" @@ -32,6 +33,7 @@ type Client struct { BlobServicesClient *storage.BlobServicesClient BlobInventoryPoliciesClient *legacystorage.BlobInventoryPoliciesClient CloudEndpointsClient *storagesync.CloudEndpointsClient + DisksPoolsClient *storagepool.DiskPoolsClient EncryptionScopesClient *storage.EncryptionScopesClient Environment az.Environment FileServicesClient *storage.FileServicesClient @@ -69,6 +71,9 @@ func NewClient(options *common.ClientOptions) *Client { encryptionScopesClient := storage.NewEncryptionScopesClientWithBaseURI(options.ResourceManagerEndpoint, options.SubscriptionId) options.ConfigureClient(&encryptionScopesClient.Client, options.ResourceManagerAuthorizer) + disksPoolsClient := storagepool.NewDiskPoolsClientWithBaseURI(options.ResourceManagerEndpoint, options.SubscriptionId) + options.ConfigureClient(&disksPoolsClient.Client, options.ResourceManagerAuthorizer) + fileServicesClient := storage.NewFileServicesClientWithBaseURI(options.ResourceManagerEndpoint, options.SubscriptionId) options.ConfigureClient(&fileServicesClient.Client, options.ResourceManagerAuthorizer) @@ -91,6 +96,7 @@ func NewClient(options *common.ClientOptions) *Client { BlobServicesClient: &blobServicesClient, BlobInventoryPoliciesClient: &blobInventoryPoliciesClient, CloudEndpointsClient: &cloudEndpointsClient, + DisksPoolsClient: &disksPoolsClient, EncryptionScopesClient: &encryptionScopesClient, Environment: options.Environment, FileServicesClient: &fileServicesClient, diff --git a/internal/services/storage/parse/storage_disks_pool.go b/internal/services/storage/parse/storage_disks_pool.go new file mode 100644 index 000000000000..660596a7b649 --- /dev/null +++ b/internal/services/storage/parse/storage_disks_pool.go @@ -0,0 +1,69 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" +) + +type StorageDisksPoolId struct { + SubscriptionId string + ResourceGroup string + DiskPoolName string +} + +func NewStorageDisksPoolID(subscriptionId, resourceGroup, diskPoolName string) StorageDisksPoolId { + return StorageDisksPoolId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + DiskPoolName: diskPoolName, + } +} + +func (id StorageDisksPoolId) String() string { + segments := []string{ + fmt.Sprintf("Disk Pool Name %q", id.DiskPoolName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Storage Disks Pool", segmentsStr) +} + +func (id StorageDisksPoolId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.StoragePool/diskPools/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.DiskPoolName) +} + +// StorageDisksPoolID parses a StorageDisksPool ID into an StorageDisksPoolId struct +func StorageDisksPoolID(input string) (*StorageDisksPoolId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := StorageDisksPoolId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.DiskPoolName, err = id.PopSegment("diskPools"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/internal/services/storage/parse/storage_disks_pool_test.go b/internal/services/storage/parse/storage_disks_pool_test.go new file mode 100644 index 000000000000..7c3cdea6d74e --- /dev/null +++ b/internal/services/storage/parse/storage_disks_pool_test.go @@ -0,0 +1,112 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = StorageDisksPoolId{} + +func TestStorageDisksPoolIDFormatter(t *testing.T) { + actual := NewStorageDisksPoolID("12345678-1234-9876-4563-123456789012", "resGroup1", "storagePool1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestStorageDisksPoolID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *StorageDisksPoolId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing DiskPoolName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/", + Error: true, + }, + + { + // missing value for DiskPoolName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1", + Expected: &StorageDisksPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + DiskPoolName: "storagePool1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.STORAGEPOOL/DISKPOOLS/STORAGEPOOL1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := StorageDisksPoolID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.DiskPoolName != v.Expected.DiskPoolName { + t.Fatalf("Expected %q but got %q for DiskPoolName", v.Expected.DiskPoolName, actual.DiskPoolName) + } + } +} diff --git a/internal/services/storage/registration.go b/internal/services/storage/registration.go index 773e0a7ada60..e8bda4b7061d 100644 --- a/internal/services/storage/registration.go +++ b/internal/services/storage/registration.go @@ -1,6 +1,7 @@ package storage import ( + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" ) @@ -60,3 +61,13 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_storage_sync_group": resourceStorageSyncGroup(), } } + +func (r Registration) DataSources() []sdk.DataSource { + return []sdk.DataSource{} +} + +func (r Registration) Resources() []sdk.Resource { + return []sdk.Resource{ + DisksPoolResource{}, + } +} diff --git a/internal/services/storage/resourceids.go b/internal/services/storage/resourceids.go index 0faeca1705b9..d583ec79c7de 100644 --- a/internal/services/storage/resourceids.go +++ b/internal/services/storage/resourceids.go @@ -4,6 +4,7 @@ package storage //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=EncryptionScope -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Storage/storageAccounts/storageAccount1/encryptionScopes/encryptionScope1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=StorageAccount -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Storage/storageAccounts/storageAccount1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=StorageContainerResourceManager -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Storage/storageAccounts/storageAccount1/blobServices/default/containers/container1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=StorageDisksPool -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=StorageShareResourceManager -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Storage/storageAccounts/storageAccount1/fileServices/fileService1/fileshares/share1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=StorageSyncGroup -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StorageSync/storageSyncServices/storageSyncService1/syncGroups/syncGroup1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=StorageSyncService -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StorageSync/storageSyncServices/storageSyncService1 diff --git a/internal/services/storage/storage_disks_pool_resource.go b/internal/services/storage/storage_disks_pool_resource.go new file mode 100644 index 000000000000..af3f1b546280 --- /dev/null +++ b/internal/services/storage/storage_disks_pool_resource.go @@ -0,0 +1,259 @@ +package storage + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" + "github.com/hashicorp/terraform-provider-azurerm/internal/location" + "github.com/hashicorp/terraform-provider-azurerm/internal/locks" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + networkValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/network/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/tags" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type DisksPoolResource struct{} + +var _ sdk.ResourceWithUpdate = DisksPoolResource{} + +type DisksPoolJobModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + Location string `tfschema:"location"` + AvailabilityZones []string `tfschema:"availability_zones"` + Sku string `tfschema:"sku_name"` + SubnetId string `tfschema:"subnet_id"` + Tags map[string]interface{} `tfschema:"tags"` +} + +func (d DisksPoolResource) Arguments() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringIsNotEmpty, + validation.StringLenBetween(7, 30), + validation.StringMatch( + regexp.MustCompile(`^[A-Za-z\d][A-Za-z\d.\-_]*[A-Za-z\d_]$`), + "The name must begin with a letter or number, end with a letter, number or underscore, and may contain only letters, numbers, underscores, periods, or hyphens.", + ), + ), + }, + "resource_group_name": azure.SchemaResourceGroupName(), + "location": location.Schema(), + "availability_zones": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + "sku_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice( + []string{ + "Basic_B1", + "Standard_S1", + "Premium_P1", + }, false, + ), + }, + "subnet_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: networkValidate.SubnetID, + }, + "tags": tags.Schema(), + } +} + +func (d DisksPoolResource) Attributes() map[string]*schema.Schema { + return map[string]*schema.Schema{} +} + +func (d DisksPoolResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + m := DisksPoolJobModel{} + err := metadata.Decode(&m) + if err != nil { + return err + } + subscriptionId := metadata.Client.Account.SubscriptionId + id := parse.NewStorageDisksPoolID(subscriptionId, m.ResourceGroupName, m.Name) + + client := metadata.Client.Storage.DisksPoolsClient + + existing, err := client.Get(ctx, m.ResourceGroupName, m.Name) + notExistingResp := utils.ResponseWasNotFound(existing.Response) + if err != nil && !notExistingResp { + return fmt.Errorf("checking for presence of existing %q: %+v", id, err) + } + if !notExistingResp { + return metadata.ResourceRequiresImport(d.ResourceType(), id) + } + + createParameter := storagepool.DiskPoolCreate{ + DiskPoolCreateProperties: &storagepool.DiskPoolCreateProperties{ + AvailabilityZones: &m.AvailabilityZones, + SubnetID: &m.SubnetId, + }, + Location: utils.String(m.Location), + Name: utils.String(m.Name), + Sku: expandDisksPoolSku(m.Sku), + Tags: tags.Expand(m.Tags), + } + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, m.Name, createParameter) + if err != nil { + return fmt.Errorf("creation of %q: %+v", id, err) + } + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for creation of %q: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (d DisksPoolResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + id, err := parse.StorageDisksPoolID(metadata.ResourceData.Id()) + if err != nil { + return err + } + client := metadata.Client.Storage.DisksPoolsClient + resp, err := client.Get(ctx, id.ResourceGroup, id.DiskPoolName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return metadata.MarkAsGone(id) + } + return fmt.Errorf("retrieving %q: %+v", id, err) + } + m := DisksPoolJobModel{ + Name: id.DiskPoolName, + ResourceGroupName: id.ResourceGroup, + Tags: tags.Flatten(resp.Tags), + } + if resp.AvailabilityZones != nil { + m.AvailabilityZones = *resp.AvailabilityZones + } + if resp.Location != nil { + m.Location = location.Normalize(*resp.Location) + } + if resp.Sku != nil && resp.Sku.Name != nil { + m.Sku = *resp.Sku.Name + } + if resp.SubnetID != nil { + m.SubnetId = *resp.SubnetID + } + return metadata.Encode(&m) + }, + } +} + +func (d DisksPoolResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + id, err := parse.StorageDisksPoolID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + locks.ByID(id.ID()) + defer locks.UnlockByID(id.ID()) + + client := metadata.Client.Storage.DisksPoolsClient + future, err := client.Delete(ctx, id.ResourceGroup, id.DiskPoolName) + if err != nil { + return fmt.Errorf("deletion of %q: %+v", id, err) + } + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of %q : %+v", id, err) + } + return nil + }, + } +} + +func (d DisksPoolResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return validate.StorageDisksPoolID +} + +func (d DisksPoolResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + r := metadata.ResourceData + id, err := parse.StorageDisksPoolID(r.Id()) + if err != nil { + return err + } + + locks.ByID(r.Id()) + defer locks.UnlockByID(r.Id()) + + client := metadata.Client.Storage.DisksPoolsClient + patch := storagepool.DiskPoolUpdate{} + m := DisksPoolJobModel{} + err = metadata.Decode(&m) + if err != nil { + return err + } + + if r.HasChange("sku") { + patch.Sku = expandDisksPoolSku(m.Sku) + } + if r.HasChange("tags") { + patch.Tags = tags.Expand(m.Tags) + } + + future, err := client.Update(ctx, id.ResourceGroup, id.DiskPoolName, patch) + if err != nil { + return fmt.Errorf("update of %q: %+v", id, err) + } + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for update of %q : %+v", id, err) + } + return nil + }, + } +} + +func (d DisksPoolResource) ModelObject() interface{} { + return &DisksPoolJobModel{} +} + +func (d DisksPoolResource) ResourceType() string { + return "azurerm_storage_disks_pool" +} + +func expandDisksPoolSku(sku string) *storagepool.Sku { + parts := strings.Split(sku, "_") + return &storagepool.Sku{ + Name: &sku, + Tier: &parts[0], + } +} diff --git a/internal/services/storage/storage_disks_pool_resource_test.go b/internal/services/storage/storage_disks_pool_resource_test.go new file mode 100644 index 000000000000..2cf3f6f0b0d7 --- /dev/null +++ b/internal/services/storage/storage_disks_pool_resource_test.go @@ -0,0 +1,158 @@ +package storage_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type StorageDisksPoolResource struct{} + +func TestAccStorageDisksPool_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool", "test") + r := StorageDisksPoolResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.diskPool(data, "Basic_B1"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageDisksPool_standard(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool", "test") + r := StorageDisksPoolResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.diskPool(data, "Standard_S1"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageDisksPool_premium(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool", "test") + r := StorageDisksPoolResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.diskPool(data, "Premium_P1"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageDisksPool_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool", "test") + r := StorageDisksPoolResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.diskPool(data, "Basic_B1"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (s StorageDisksPoolResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.StorageDisksPoolID(state.ID) + if err != nil { + return nil, err + } + client := clients.Storage.DisksPoolsClient + resp, err := client.Get(ctx, id.ResourceGroup, id.DiskPoolName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving %q: %+v", id, err) + } + return utils.Bool(true), nil +} + +func (r StorageDisksPoolResource) diskPool(data acceptance.TestData, skuName string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_disks_pool" "test" { + name = "acctest-diskspool-%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + availability_zones = ["1"] + sku_name = "%s" + subnet_id = azurerm_subnet.test.id + tags = { + foo = "bar" + } +} +`, r.template(data), data.RandomString, skuName) +} + +func (r StorageDisksPoolResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-diskspool-%[2]d" + location = "%[1]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctest-vnet-%[2]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + address_space = ["10.0.0.0/16"] +} + +resource "azurerm_subnet" "test" { + name = "acctest-subnet-%[2]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.0.0/24"] + delegation { + name = "diskspool" + service_delegation { + actions = ["Microsoft.Network/virtualNetworks/read"] + name = "Microsoft.StoragePool/diskPools" + } + } +} +`, data.Locations.Primary, data.RandomInteger) +} + +func (r StorageDisksPoolResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_disks_pool" "import" { + name = "acctest-diskspool-%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + availability_zones = ["1"] + sku_name = "Basic_B1" + subnet_id = azurerm_subnet.test.id + tags = { + foo = "bar" + } +} +`, r.diskPool(data, "Basic_B1"), data.RandomString) +} diff --git a/internal/services/storage/validate/storage_disks_pool_id.go b/internal/services/storage/validate/storage_disks_pool_id.go new file mode 100644 index 000000000000..2c5583ae8826 --- /dev/null +++ b/internal/services/storage/validate/storage_disks_pool_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" +) + +func StorageDisksPoolID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.StorageDisksPoolID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/internal/services/storage/validate/storage_disks_pool_id_test.go b/internal/services/storage/validate/storage_disks_pool_id_test.go new file mode 100644 index 000000000000..103c0d6b3870 --- /dev/null +++ b/internal/services/storage/validate/storage_disks_pool_id_test.go @@ -0,0 +1,76 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestStorageDisksPoolID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing DiskPoolName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/", + Valid: false, + }, + + { + // missing value for DiskPoolName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.STORAGEPOOL/DISKPOOLS/STORAGEPOOL1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := StorageDisksPoolID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/CHANGELOG.md new file mode 100644 index 000000000000..52911e4cc5e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/CHANGELOG.md @@ -0,0 +1,2 @@ +# Change History + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/_meta.json new file mode 100644 index 000000000000..edec67d92670 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/_meta.json @@ -0,0 +1,11 @@ +{ + "commit": "af463c3f9502d353b8a009685177f13335adb8cd", + "readme": "/_/azure-rest-api-specs/specification/storagepool/resource-manager/readme.md", + "tag": "package-2021-08-01", + "use": "@microsoft.azure/autorest.go@2.1.187", + "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-08-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/storagepool/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/client.go new file mode 100644 index 000000000000..72832e2ca49b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/client.go @@ -0,0 +1,41 @@ +// Package storagepool implements the Azure ARM Storagepool service API version 2021-08-01. +// +// +package storagepool + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Storagepool + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Storagepool. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/diskpools.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/diskpools.go new file mode 100644 index 000000000000..e92113897e67 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/diskpools.go @@ -0,0 +1,1041 @@ +package storagepool + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DiskPoolsClient is the client for the DiskPools methods of the Storagepool service. +type DiskPoolsClient struct { + BaseClient +} + +// NewDiskPoolsClient creates an instance of the DiskPoolsClient client. +func NewDiskPoolsClient(subscriptionID string) DiskPoolsClient { + return NewDiskPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDiskPoolsClientWithBaseURI creates an instance of the DiskPoolsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewDiskPoolsClientWithBaseURI(baseURI string, subscriptionID string) DiskPoolsClient { + return DiskPoolsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or Update Disk pool. This create or update operation can take 15 minutes to complete. This is +// expected service behavior. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +// diskPoolCreatePayload - request payload for Disk Pool create operation +func (client DiskPoolsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskPoolName string, diskPoolCreatePayload DiskPoolCreate) (result DiskPoolsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}, + {TargetValue: diskPoolCreatePayload, + Constraints: []validation.Constraint{{Target: "diskPoolCreatePayload.Sku", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "diskPoolCreatePayload.Sku.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "diskPoolCreatePayload.DiskPoolCreateProperties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "diskPoolCreatePayload.DiskPoolCreateProperties.SubnetID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "diskPoolCreatePayload.Location", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskPoolName, diskPoolCreatePayload) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DiskPoolsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskPoolName string, diskPoolCreatePayload DiskPoolCreate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + diskPoolCreatePayload.ID = nil + diskPoolCreatePayload.Name = nil + diskPoolCreatePayload.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}", pathParameters), + autorest.WithJSON(diskPoolCreatePayload), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) CreateOrUpdateSender(req *http.Request) (future DiskPoolsCreateOrUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) CreateOrUpdateResponder(resp *http.Response) (result DiskPool, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Deallocate shuts down the Disk Pool and releases the compute resources. You are not billed for the compute resources +// that this Disk Pool uses. This operation can take 10 minutes to complete. This is expected service behavior. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +func (client DiskPoolsClient) Deallocate(ctx context.Context, resourceGroupName string, diskPoolName string) (result DiskPoolsDeallocateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.Deallocate") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "Deallocate", err.Error()) + } + + req, err := client.DeallocatePreparer(ctx, resourceGroupName, diskPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Deallocate", nil, "Failure preparing request") + return + } + + result, err = client.DeallocateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Deallocate", result.Response(), "Failure sending request") + return + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client DiskPoolsClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, diskPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/deallocate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) DeallocateSender(req *http.Request) (future DiskPoolsDeallocateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete delete a Disk pool; attached disks are not affected. This delete operation can take 10 minutes to complete. +// This is expected service behavior. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +func (client DiskPoolsClient) Delete(ctx context.Context, resourceGroupName string, diskPoolName string) (result DiskPoolsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.Delete") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, diskPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DiskPoolsClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) DeleteSender(req *http.Request) (future DiskPoolsDeleteFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get a Disk pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +func (client DiskPoolsClient) Get(ctx context.Context, resourceGroupName string, diskPoolName string) (result DiskPool, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, diskPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client DiskPoolsClient) GetPreparer(ctx context.Context, resourceGroupName string, diskPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) GetResponder(resp *http.Response) (result DiskPool, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup gets a list of DiskPools in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +func (client DiskPoolsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskPoolListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.dplr.Response.Response != nil { + sc = result.dplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "ListByResourceGroup", err.Error()) + } + + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.dplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.dplr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListByResourceGroup", resp, "Failure responding to request") + return + } + if result.dplr.hasNextLink() && result.dplr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DiskPoolsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) ListByResourceGroupResponder(resp *http.Response) (result DiskPoolListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client DiskPoolsClient) listByResourceGroupNextResults(ctx context.Context, lastResults DiskPoolListResult) (result DiskPoolListResult, err error) { + req, err := lastResults.diskPoolListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client DiskPoolsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DiskPoolListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// ListBySubscription gets a list of Disk Pools in a subscription +func (client DiskPoolsClient) ListBySubscription(ctx context.Context) (result DiskPoolListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.ListBySubscription") + defer func() { + sc := -1 + if result.dplr.Response.Response != nil { + sc = result.dplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "ListBySubscription", err.Error()) + } + + result.fn = client.listBySubscriptionNextResults + req, err := client.ListBySubscriptionPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.dplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListBySubscription", resp, "Failure sending request") + return + } + + result.dplr, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListBySubscription", resp, "Failure responding to request") + return + } + if result.dplr.hasNextLink() && result.dplr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client DiskPoolsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.StoragePool/diskPools", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) ListBySubscriptionResponder(resp *http.Response) (result DiskPoolListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listBySubscriptionNextResults retrieves the next set of results, if any. +func (client DiskPoolsClient) listBySubscriptionNextResults(ctx context.Context, lastResults DiskPoolListResult) (result DiskPoolListResult, err error) { + req, err := lastResults.diskPoolListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request") + } + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required. +func (client DiskPoolsClient) ListBySubscriptionComplete(ctx context.Context) (result DiskPoolListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.ListBySubscription") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListBySubscription(ctx) + return +} + +// ListOutboundNetworkDependenciesEndpoints gets the network endpoints of all outbound dependencies of a Disk Pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +func (client DiskPoolsClient) ListOutboundNetworkDependenciesEndpoints(ctx context.Context, resourceGroupName string, diskPoolName string) (result OutboundEnvironmentEndpointListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.ListOutboundNetworkDependenciesEndpoints") + defer func() { + sc := -1 + if result.oeel.Response.Response != nil { + sc = result.oeel.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "ListOutboundNetworkDependenciesEndpoints", err.Error()) + } + + result.fn = client.listOutboundNetworkDependenciesEndpointsNextResults + req, err := client.ListOutboundNetworkDependenciesEndpointsPreparer(ctx, resourceGroupName, diskPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListOutboundNetworkDependenciesEndpoints", nil, "Failure preparing request") + return + } + + resp, err := client.ListOutboundNetworkDependenciesEndpointsSender(req) + if err != nil { + result.oeel.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListOutboundNetworkDependenciesEndpoints", resp, "Failure sending request") + return + } + + result.oeel, err = client.ListOutboundNetworkDependenciesEndpointsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "ListOutboundNetworkDependenciesEndpoints", resp, "Failure responding to request") + return + } + if result.oeel.hasNextLink() && result.oeel.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListOutboundNetworkDependenciesEndpointsPreparer prepares the ListOutboundNetworkDependenciesEndpoints request. +func (client DiskPoolsClient) ListOutboundNetworkDependenciesEndpointsPreparer(ctx context.Context, resourceGroupName string, diskPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/outboundNetworkDependenciesEndpoints", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListOutboundNetworkDependenciesEndpointsSender sends the ListOutboundNetworkDependenciesEndpoints request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) ListOutboundNetworkDependenciesEndpointsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListOutboundNetworkDependenciesEndpointsResponder handles the response to the ListOutboundNetworkDependenciesEndpoints request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) ListOutboundNetworkDependenciesEndpointsResponder(resp *http.Response) (result OutboundEnvironmentEndpointList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listOutboundNetworkDependenciesEndpointsNextResults retrieves the next set of results, if any. +func (client DiskPoolsClient) listOutboundNetworkDependenciesEndpointsNextResults(ctx context.Context, lastResults OutboundEnvironmentEndpointList) (result OutboundEnvironmentEndpointList, err error) { + req, err := lastResults.outboundEnvironmentEndpointListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listOutboundNetworkDependenciesEndpointsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListOutboundNetworkDependenciesEndpointsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listOutboundNetworkDependenciesEndpointsNextResults", resp, "Failure sending next results request") + } + result, err = client.ListOutboundNetworkDependenciesEndpointsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "listOutboundNetworkDependenciesEndpointsNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListOutboundNetworkDependenciesEndpointsComplete enumerates all values, automatically crossing page boundaries as required. +func (client DiskPoolsClient) ListOutboundNetworkDependenciesEndpointsComplete(ctx context.Context, resourceGroupName string, diskPoolName string) (result OutboundEnvironmentEndpointListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.ListOutboundNetworkDependenciesEndpoints") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListOutboundNetworkDependenciesEndpoints(ctx, resourceGroupName, diskPoolName) + return +} + +// Start the operation to start a Disk Pool. This start operation can take 10 minutes to complete. This is expected +// service behavior. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +func (client DiskPoolsClient) Start(ctx context.Context, resourceGroupName string, diskPoolName string) (result DiskPoolsStartFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.Start") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "Start", err.Error()) + } + + req, err := client.StartPreparer(ctx, resourceGroupName, diskPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Start", nil, "Failure preparing request") + return + } + + result, err = client.StartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Start", result.Response(), "Failure sending request") + return + } + + return +} + +// StartPreparer prepares the Start request. +func (client DiskPoolsClient) StartPreparer(ctx context.Context, resourceGroupName string, diskPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) StartSender(req *http.Request) (future DiskPoolsStartFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update update a Disk pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +// diskPoolUpdatePayload - request payload for Disk Pool update operation. +func (client DiskPoolsClient) Update(ctx context.Context, resourceGroupName string, diskPoolName string, diskPoolUpdatePayload DiskPoolUpdate) (result DiskPoolsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.Update") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, diskPoolName, diskPoolUpdatePayload) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client DiskPoolsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskPoolName string, diskPoolUpdatePayload DiskPoolUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}", pathParameters), + autorest.WithJSON(diskPoolUpdatePayload), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) UpdateSender(req *http.Request) (future DiskPoolsUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) UpdateResponder(resp *http.Response) (result DiskPool, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Upgrade upgrade replaces the underlying virtual machine hosts one at a time. This operation can take 10-15 minutes +// to complete. This is expected service behavior. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +func (client DiskPoolsClient) Upgrade(ctx context.Context, resourceGroupName string, diskPoolName string) (result DiskPoolsUpgradeFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolsClient.Upgrade") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolsClient", "Upgrade", err.Error()) + } + + req, err := client.UpgradePreparer(ctx, resourceGroupName, diskPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Upgrade", nil, "Failure preparing request") + return + } + + result, err = client.UpgradeSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsClient", "Upgrade", result.Response(), "Failure sending request") + return + } + + return +} + +// UpgradePreparer prepares the Upgrade request. +func (client DiskPoolsClient) UpgradePreparer(ctx context.Context, resourceGroupName string, diskPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/upgrade", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpgradeSender sends the Upgrade request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolsClient) UpgradeSender(req *http.Request) (future DiskPoolsUpgradeFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// UpgradeResponder handles the response to the Upgrade request. The method always +// closes the http.Response Body. +func (client DiskPoolsClient) UpgradeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/diskpoolzones.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/diskpoolzones.go new file mode 100644 index 000000000000..eb0d6d0c85cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/diskpoolzones.go @@ -0,0 +1,154 @@ +package storagepool + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DiskPoolZonesClient is the client for the DiskPoolZones methods of the Storagepool service. +type DiskPoolZonesClient struct { + BaseClient +} + +// NewDiskPoolZonesClient creates an instance of the DiskPoolZonesClient client. +func NewDiskPoolZonesClient(subscriptionID string) DiskPoolZonesClient { + return NewDiskPoolZonesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDiskPoolZonesClientWithBaseURI creates an instance of the DiskPoolZonesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewDiskPoolZonesClientWithBaseURI(baseURI string, subscriptionID string) DiskPoolZonesClient { + return DiskPoolZonesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists available Disk Pool Skus in an Azure location. +// Parameters: +// location - the location of the resource. +func (client DiskPoolZonesClient) List(ctx context.Context, location string) (result DiskPoolZoneListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolZonesClient.List") + defer func() { + sc := -1 + if result.dpzlr.Response.Response != nil { + sc = result.dpzlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.DiskPoolZonesClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolZonesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.dpzlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolZonesClient", "List", resp, "Failure sending request") + return + } + + result.dpzlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolZonesClient", "List", resp, "Failure responding to request") + return + } + if result.dpzlr.hasNextLink() && result.dpzlr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client DiskPoolZonesClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.StoragePool/locations/{location}/diskPoolZones", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DiskPoolZonesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DiskPoolZonesClient) ListResponder(resp *http.Response) (result DiskPoolZoneListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client DiskPoolZonesClient) listNextResults(ctx context.Context, lastResults DiskPoolZoneListResult) (result DiskPoolZoneListResult, err error) { + req, err := lastResults.diskPoolZoneListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storagepool.DiskPoolZonesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storagepool.DiskPoolZonesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolZonesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client DiskPoolZonesClient) ListComplete(ctx context.Context, location string) (result DiskPoolZoneListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolZonesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, location) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/enums.go new file mode 100644 index 000000000000..9c5ebc84397d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/enums.go @@ -0,0 +1,142 @@ +package storagepool + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// CreatedByType enumerates the values for created by type. +type CreatedByType string + +const ( + // CreatedByTypeApplication ... + CreatedByTypeApplication CreatedByType = "Application" + // CreatedByTypeKey ... + CreatedByTypeKey CreatedByType = "Key" + // CreatedByTypeManagedIdentity ... + CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity" + // CreatedByTypeUser ... + CreatedByTypeUser CreatedByType = "User" +) + +// PossibleCreatedByTypeValues returns an array of possible values for the CreatedByType const type. +func PossibleCreatedByTypeValues() []CreatedByType { + return []CreatedByType{CreatedByTypeApplication, CreatedByTypeKey, CreatedByTypeManagedIdentity, CreatedByTypeUser} +} + +// DiskPoolTier enumerates the values for disk pool tier. +type DiskPoolTier string + +const ( + // DiskPoolTierBasic ... + DiskPoolTierBasic DiskPoolTier = "Basic" + // DiskPoolTierPremium ... + DiskPoolTierPremium DiskPoolTier = "Premium" + // DiskPoolTierStandard ... + DiskPoolTierStandard DiskPoolTier = "Standard" +) + +// PossibleDiskPoolTierValues returns an array of possible values for the DiskPoolTier const type. +func PossibleDiskPoolTierValues() []DiskPoolTier { + return []DiskPoolTier{DiskPoolTierBasic, DiskPoolTierPremium, DiskPoolTierStandard} +} + +// IscsiTargetACLMode enumerates the values for iscsi target acl mode. +type IscsiTargetACLMode string + +const ( + // IscsiTargetACLModeDynamic ... + IscsiTargetACLModeDynamic IscsiTargetACLMode = "Dynamic" + // IscsiTargetACLModeStatic ... + IscsiTargetACLModeStatic IscsiTargetACLMode = "Static" +) + +// PossibleIscsiTargetACLModeValues returns an array of possible values for the IscsiTargetACLMode const type. +func PossibleIscsiTargetACLModeValues() []IscsiTargetACLMode { + return []IscsiTargetACLMode{IscsiTargetACLModeDynamic, IscsiTargetACLModeStatic} +} + +// OperationalStatus enumerates the values for operational status. +type OperationalStatus string + +const ( + // OperationalStatusHealthy ... + OperationalStatusHealthy OperationalStatus = "Healthy" + // OperationalStatusInvalid ... + OperationalStatusInvalid OperationalStatus = "Invalid" + // OperationalStatusRunning ... + OperationalStatusRunning OperationalStatus = "Running" + // OperationalStatusStopped ... + OperationalStatusStopped OperationalStatus = "Stopped" + // OperationalStatusStoppeddeallocated ... + OperationalStatusStoppeddeallocated OperationalStatus = "Stopped (deallocated)" + // OperationalStatusUnhealthy ... + OperationalStatusUnhealthy OperationalStatus = "Unhealthy" + // OperationalStatusUnknown ... + OperationalStatusUnknown OperationalStatus = "Unknown" + // OperationalStatusUpdating ... + OperationalStatusUpdating OperationalStatus = "Updating" +) + +// PossibleOperationalStatusValues returns an array of possible values for the OperationalStatus const type. +func PossibleOperationalStatusValues() []OperationalStatus { + return []OperationalStatus{OperationalStatusHealthy, OperationalStatusInvalid, OperationalStatusRunning, OperationalStatusStopped, OperationalStatusStoppeddeallocated, OperationalStatusUnhealthy, OperationalStatusUnknown, OperationalStatusUpdating} +} + +// ProvisioningStates enumerates the values for provisioning states. +type ProvisioningStates string + +const ( + // ProvisioningStatesCanceled ... + ProvisioningStatesCanceled ProvisioningStates = "Canceled" + // ProvisioningStatesCreating ... + ProvisioningStatesCreating ProvisioningStates = "Creating" + // ProvisioningStatesDeleting ... + ProvisioningStatesDeleting ProvisioningStates = "Deleting" + // ProvisioningStatesFailed ... + ProvisioningStatesFailed ProvisioningStates = "Failed" + // ProvisioningStatesInvalid ... + ProvisioningStatesInvalid ProvisioningStates = "Invalid" + // ProvisioningStatesPending ... + ProvisioningStatesPending ProvisioningStates = "Pending" + // ProvisioningStatesSucceeded ... + ProvisioningStatesSucceeded ProvisioningStates = "Succeeded" + // ProvisioningStatesUpdating ... + ProvisioningStatesUpdating ProvisioningStates = "Updating" +) + +// PossibleProvisioningStatesValues returns an array of possible values for the ProvisioningStates const type. +func PossibleProvisioningStatesValues() []ProvisioningStates { + return []ProvisioningStates{ProvisioningStatesCanceled, ProvisioningStatesCreating, ProvisioningStatesDeleting, ProvisioningStatesFailed, ProvisioningStatesInvalid, ProvisioningStatesPending, ProvisioningStatesSucceeded, ProvisioningStatesUpdating} +} + +// ResourceSkuRestrictionsReasonCode enumerates the values for resource sku restrictions reason code. +type ResourceSkuRestrictionsReasonCode string + +const ( + // ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription ... + ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription" + // ResourceSkuRestrictionsReasonCodeQuotaID ... + ResourceSkuRestrictionsReasonCodeQuotaID ResourceSkuRestrictionsReasonCode = "QuotaId" +) + +// PossibleResourceSkuRestrictionsReasonCodeValues returns an array of possible values for the ResourceSkuRestrictionsReasonCode const type. +func PossibleResourceSkuRestrictionsReasonCodeValues() []ResourceSkuRestrictionsReasonCode { + return []ResourceSkuRestrictionsReasonCode{ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription, ResourceSkuRestrictionsReasonCodeQuotaID} +} + +// ResourceSkuRestrictionsType enumerates the values for resource sku restrictions type. +type ResourceSkuRestrictionsType string + +const ( + // ResourceSkuRestrictionsTypeLocation ... + ResourceSkuRestrictionsTypeLocation ResourceSkuRestrictionsType = "Location" + // ResourceSkuRestrictionsTypeZone ... + ResourceSkuRestrictionsTypeZone ResourceSkuRestrictionsType = "Zone" +) + +// PossibleResourceSkuRestrictionsTypeValues returns an array of possible values for the ResourceSkuRestrictionsType const type. +func PossibleResourceSkuRestrictionsTypeValues() []ResourceSkuRestrictionsType { + return []ResourceSkuRestrictionsType{ResourceSkuRestrictionsTypeLocation, ResourceSkuRestrictionsTypeZone} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/iscsitargets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/iscsitargets.go new file mode 100644 index 000000000000..cbd901a906d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/iscsitargets.go @@ -0,0 +1,528 @@ +package storagepool + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// IscsiTargetsClient is the client for the IscsiTargets methods of the Storagepool service. +type IscsiTargetsClient struct { + BaseClient +} + +// NewIscsiTargetsClient creates an instance of the IscsiTargetsClient client. +func NewIscsiTargetsClient(subscriptionID string) IscsiTargetsClient { + return NewIscsiTargetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewIscsiTargetsClientWithBaseURI creates an instance of the IscsiTargetsClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewIscsiTargetsClientWithBaseURI(baseURI string, subscriptionID string) IscsiTargetsClient { + return IscsiTargetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or Update an iSCSI Target. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +// iscsiTargetName - the name of the iSCSI Target. +// iscsiTargetCreatePayload - request payload for iSCSI Target create operation. +func (client IscsiTargetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string, iscsiTargetCreatePayload IscsiTargetCreate) (result IscsiTargetsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IscsiTargetsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}, + {TargetValue: iscsiTargetCreatePayload, + Constraints: []validation.Constraint{{Target: "iscsiTargetCreatePayload.IscsiTargetCreateProperties", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.IscsiTargetsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskPoolName, iscsiTargetName, iscsiTargetCreatePayload) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client IscsiTargetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string, iscsiTargetCreatePayload IscsiTargetCreate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "iscsiTargetName": autorest.Encode("path", iscsiTargetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets/{iscsiTargetName}", pathParameters), + autorest.WithJSON(iscsiTargetCreatePayload), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client IscsiTargetsClient) CreateOrUpdateSender(req *http.Request) (future IscsiTargetsCreateOrUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client IscsiTargetsClient) CreateOrUpdateResponder(resp *http.Response) (result IscsiTarget, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete an iSCSI Target. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +// iscsiTargetName - the name of the iSCSI Target. +func (client IscsiTargetsClient) Delete(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string) (result IscsiTargetsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IscsiTargetsClient.Delete") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.IscsiTargetsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, diskPoolName, iscsiTargetName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client IscsiTargetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "iscsiTargetName": autorest.Encode("path", iscsiTargetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets/{iscsiTargetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client IscsiTargetsClient) DeleteSender(req *http.Request) (future IscsiTargetsDeleteFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client IscsiTargetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get an iSCSI Target. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +// iscsiTargetName - the name of the iSCSI Target. +func (client IscsiTargetsClient) Get(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string) (result IscsiTarget, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IscsiTargetsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.IscsiTargetsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, diskPoolName, iscsiTargetName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client IscsiTargetsClient) GetPreparer(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "iscsiTargetName": autorest.Encode("path", iscsiTargetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets/{iscsiTargetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client IscsiTargetsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client IscsiTargetsClient) GetResponder(resp *http.Response) (result IscsiTarget, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByDiskPool get iSCSI Targets in a Disk pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +func (client IscsiTargetsClient) ListByDiskPool(ctx context.Context, resourceGroupName string, diskPoolName string) (result IscsiTargetListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IscsiTargetsClient.ListByDiskPool") + defer func() { + sc := -1 + if result.itl.Response.Response != nil { + sc = result.itl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.IscsiTargetsClient", "ListByDiskPool", err.Error()) + } + + result.fn = client.listByDiskPoolNextResults + req, err := client.ListByDiskPoolPreparer(ctx, resourceGroupName, diskPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "ListByDiskPool", nil, "Failure preparing request") + return + } + + resp, err := client.ListByDiskPoolSender(req) + if err != nil { + result.itl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "ListByDiskPool", resp, "Failure sending request") + return + } + + result.itl, err = client.ListByDiskPoolResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "ListByDiskPool", resp, "Failure responding to request") + return + } + if result.itl.hasNextLink() && result.itl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListByDiskPoolPreparer prepares the ListByDiskPool request. +func (client IscsiTargetsClient) ListByDiskPoolPreparer(ctx context.Context, resourceGroupName string, diskPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByDiskPoolSender sends the ListByDiskPool request. The method will close the +// http.Response Body if it receives an error. +func (client IscsiTargetsClient) ListByDiskPoolSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByDiskPoolResponder handles the response to the ListByDiskPool request. The method always +// closes the http.Response Body. +func (client IscsiTargetsClient) ListByDiskPoolResponder(resp *http.Response) (result IscsiTargetList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByDiskPoolNextResults retrieves the next set of results, if any. +func (client IscsiTargetsClient) listByDiskPoolNextResults(ctx context.Context, lastResults IscsiTargetList) (result IscsiTargetList, err error) { + req, err := lastResults.iscsiTargetListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "listByDiskPoolNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByDiskPoolSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "listByDiskPoolNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByDiskPoolResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "listByDiskPoolNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByDiskPoolComplete enumerates all values, automatically crossing page boundaries as required. +func (client IscsiTargetsClient) ListByDiskPoolComplete(ctx context.Context, resourceGroupName string, diskPoolName string) (result IscsiTargetListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IscsiTargetsClient.ListByDiskPool") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByDiskPool(ctx, resourceGroupName, diskPoolName) + return +} + +// Update update an iSCSI Target. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// diskPoolName - the name of the Disk Pool. +// iscsiTargetName - the name of the iSCSI Target. +// iscsiTargetUpdatePayload - request payload for iSCSI Target update operation. +func (client IscsiTargetsClient) Update(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string, iscsiTargetUpdatePayload IscsiTargetUpdate) (result IscsiTargetsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IscsiTargetsClient.Update") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._]*[0-9A-Za-z]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.IscsiTargetsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, diskPoolName, iscsiTargetName, iscsiTargetUpdatePayload) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client IscsiTargetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string, iscsiTargetUpdatePayload IscsiTargetUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskPoolName": autorest.Encode("path", diskPoolName), + "iscsiTargetName": autorest.Encode("path", iscsiTargetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets/{iscsiTargetName}", pathParameters), + autorest.WithJSON(iscsiTargetUpdatePayload), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client IscsiTargetsClient) UpdateSender(req *http.Request) (future IscsiTargetsUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client IscsiTargetsClient) UpdateResponder(resp *http.Response) (result IscsiTarget, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/models.go new file mode 100644 index 000000000000..1117fbad068c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/models.go @@ -0,0 +1,2348 @@ +package storagepool + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool" + +// ACL access Control List (ACL) for an iSCSI Target; defines LUN masking policy +type ACL struct { + // InitiatorIqn - iSCSI initiator IQN (iSCSI Qualified Name); example: "iqn.2005-03.org.iscsi:client". + InitiatorIqn *string `json:"initiatorIqn,omitempty"` + // MappedLuns - List of LUN names mapped to the ACL. + MappedLuns *[]string `json:"mappedLuns,omitempty"` +} + +// Disk azure Managed Disk to attach to the Disk Pool. +type Disk struct { + // ID - Unique Azure Resource ID of the Managed Disk. + ID *string `json:"id,omitempty"` +} + +// DiskPool response for Disk Pool request. +type DiskPool struct { + autorest.Response `json:"-"` + // Sku - Determines the SKU of the Disk pool + *Sku `json:"sku,omitempty"` + // DiskPoolProperties - Properties of Disk Pool. + *DiskPoolProperties `json:"properties,omitempty"` + // ManagedBy - READ-ONLY; Azure resource id. Indicates if this resource is managed by another Azure resource. + ManagedBy *string `json:"managedBy,omitempty"` + // ManagedByExtended - READ-ONLY; List of Azure resource ids that manage this resource. + ManagedByExtended *[]string `json:"managedByExtended,omitempty"` + // SystemData - READ-ONLY; Resource metadata required by ARM RPC + SystemData *SystemMetadata `json:"systemData,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives. + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskPool. +func (dp DiskPool) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dp.Sku != nil { + objectMap["sku"] = dp.Sku + } + if dp.DiskPoolProperties != nil { + objectMap["properties"] = dp.DiskPoolProperties + } + if dp.Tags != nil { + objectMap["tags"] = dp.Tags + } + if dp.Location != nil { + objectMap["location"] = dp.Location + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskPool struct. +func (dp *DiskPool) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + dp.Sku = &sku + } + case "properties": + if v != nil { + var diskPoolProperties DiskPoolProperties + err = json.Unmarshal(*v, &diskPoolProperties) + if err != nil { + return err + } + dp.DiskPoolProperties = &diskPoolProperties + } + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + dp.ManagedBy = &managedBy + } + case "managedByExtended": + if v != nil { + var managedByExtended []string + err = json.Unmarshal(*v, &managedByExtended) + if err != nil { + return err + } + dp.ManagedByExtended = &managedByExtended + } + case "systemData": + if v != nil { + var systemData SystemMetadata + err = json.Unmarshal(*v, &systemData) + if err != nil { + return err + } + dp.SystemData = &systemData + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + dp.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + dp.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + dp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + dp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + dp.Type = &typeVar + } + } + } + + return nil +} + +// DiskPoolCreate request payload for create or update Disk Pool request. +type DiskPoolCreate struct { + // Sku - Determines the SKU of the Disk Pool + Sku *Sku `json:"sku,omitempty"` + // DiskPoolCreateProperties - Properties for Disk Pool create request. + *DiskPoolCreateProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives. + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` + // ManagedBy - Azure resource id. Indicates if this resource is managed by another Azure resource. + ManagedBy *string `json:"managedBy,omitempty"` + // ManagedByExtended - List of Azure resource ids that manage this resource. + ManagedByExtended *[]string `json:"managedByExtended,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskPoolCreate. +func (dpc DiskPoolCreate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dpc.Sku != nil { + objectMap["sku"] = dpc.Sku + } + if dpc.DiskPoolCreateProperties != nil { + objectMap["properties"] = dpc.DiskPoolCreateProperties + } + if dpc.Tags != nil { + objectMap["tags"] = dpc.Tags + } + if dpc.Location != nil { + objectMap["location"] = dpc.Location + } + if dpc.ManagedBy != nil { + objectMap["managedBy"] = dpc.ManagedBy + } + if dpc.ManagedByExtended != nil { + objectMap["managedByExtended"] = dpc.ManagedByExtended + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskPoolCreate struct. +func (dpc *DiskPoolCreate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + dpc.Sku = &sku + } + case "properties": + if v != nil { + var diskPoolCreateProperties DiskPoolCreateProperties + err = json.Unmarshal(*v, &diskPoolCreateProperties) + if err != nil { + return err + } + dpc.DiskPoolCreateProperties = &diskPoolCreateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + dpc.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + dpc.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + dpc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + dpc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + dpc.Type = &typeVar + } + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + dpc.ManagedBy = &managedBy + } + case "managedByExtended": + if v != nil { + var managedByExtended []string + err = json.Unmarshal(*v, &managedByExtended) + if err != nil { + return err + } + dpc.ManagedByExtended = &managedByExtended + } + } + } + + return nil +} + +// DiskPoolCreateProperties properties for Disk Pool create or update request. +type DiskPoolCreateProperties struct { + // AvailabilityZones - Logical zone for Disk Pool resource; example: ["1"]. + AvailabilityZones *[]string `json:"availabilityZones,omitempty"` + // Disks - List of Azure Managed Disks to attach to a Disk Pool. + Disks *[]Disk `json:"disks,omitempty"` + // SubnetID - Azure Resource ID of a Subnet for the Disk Pool. + SubnetID *string `json:"subnetId,omitempty"` + // AdditionalCapabilities - List of additional capabilities for a Disk Pool. + AdditionalCapabilities *[]string `json:"additionalCapabilities,omitempty"` +} + +// DiskPoolListResult list of Disk Pools +type DiskPoolListResult struct { + autorest.Response `json:"-"` + // Value - An array of Disk pool objects. + Value *[]DiskPool `json:"value,omitempty"` + // NextLink - READ-ONLY; URI to fetch the next section of the paginated response. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskPoolListResult. +func (dplr DiskPoolListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dplr.Value != nil { + objectMap["value"] = dplr.Value + } + return json.Marshal(objectMap) +} + +// DiskPoolListResultIterator provides access to a complete listing of DiskPool values. +type DiskPoolListResultIterator struct { + i int + page DiskPoolListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskPoolListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DiskPoolListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskPoolListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskPoolListResultIterator) Response() DiskPoolListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskPoolListResultIterator) Value() DiskPool { + if !iter.page.NotDone() { + return DiskPool{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DiskPoolListResultIterator type. +func NewDiskPoolListResultIterator(page DiskPoolListResultPage) DiskPoolListResultIterator { + return DiskPoolListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dplr DiskPoolListResult) IsEmpty() bool { + return dplr.Value == nil || len(*dplr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dplr DiskPoolListResult) hasNextLink() bool { + return dplr.NextLink != nil && len(*dplr.NextLink) != 0 +} + +// diskPoolListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dplr DiskPoolListResult) diskPoolListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dplr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dplr.NextLink))) +} + +// DiskPoolListResultPage contains a page of DiskPool values. +type DiskPoolListResultPage struct { + fn func(context.Context, DiskPoolListResult) (DiskPoolListResult, error) + dplr DiskPoolListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskPoolListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dplr) + if err != nil { + return err + } + page.dplr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskPoolListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskPoolListResultPage) NotDone() bool { + return !page.dplr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskPoolListResultPage) Response() DiskPoolListResult { + return page.dplr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskPoolListResultPage) Values() []DiskPool { + if page.dplr.IsEmpty() { + return nil + } + return *page.dplr.Value +} + +// Creates a new instance of the DiskPoolListResultPage type. +func NewDiskPoolListResultPage(cur DiskPoolListResult, getNextPage func(context.Context, DiskPoolListResult) (DiskPoolListResult, error)) DiskPoolListResultPage { + return DiskPoolListResultPage{ + fn: getNextPage, + dplr: cur, + } +} + +// DiskPoolProperties disk Pool response properties. +type DiskPoolProperties struct { + // ProvisioningState - State of the operation on the resource. Possible values include: 'ProvisioningStatesInvalid', 'ProvisioningStatesSucceeded', 'ProvisioningStatesFailed', 'ProvisioningStatesCanceled', 'ProvisioningStatesPending', 'ProvisioningStatesCreating', 'ProvisioningStatesUpdating', 'ProvisioningStatesDeleting' + ProvisioningState ProvisioningStates `json:"provisioningState,omitempty"` + // AvailabilityZones - Logical zone for Disk Pool resource; example: ["1"]. + AvailabilityZones *[]string `json:"availabilityZones,omitempty"` + // Status - Operational status of the Disk Pool. Possible values include: 'OperationalStatusInvalid', 'OperationalStatusUnknown', 'OperationalStatusHealthy', 'OperationalStatusUnhealthy', 'OperationalStatusUpdating', 'OperationalStatusRunning', 'OperationalStatusStopped', 'OperationalStatusStoppeddeallocated' + Status OperationalStatus `json:"status,omitempty"` + // Disks - List of Azure Managed Disks to attach to a Disk Pool. + Disks *[]Disk `json:"disks,omitempty"` + // SubnetID - Azure Resource ID of a Subnet for the Disk Pool. + SubnetID *string `json:"subnetId,omitempty"` + // AdditionalCapabilities - List of additional capabilities for Disk Pool. + AdditionalCapabilities *[]string `json:"additionalCapabilities,omitempty"` +} + +// DiskPoolsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DiskPoolsCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskPoolsClient) (DiskPool, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskPoolsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskPoolsCreateOrUpdateFuture.Result. +func (future *DiskPoolsCreateOrUpdateFuture) result(client DiskPoolsClient) (dp DiskPool, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + dp.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.DiskPoolsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dp.Response.Response, err = future.GetResult(sender); err == nil && dp.Response.Response.StatusCode != http.StatusNoContent { + dp, err = client.CreateOrUpdateResponder(dp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsCreateOrUpdateFuture", "Result", dp.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskPoolsDeallocateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DiskPoolsDeallocateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskPoolsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskPoolsDeallocateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskPoolsDeallocateFuture.Result. +func (future *DiskPoolsDeallocateFuture) result(client DiskPoolsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.DiskPoolsDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + +// DiskPoolsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DiskPoolsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskPoolsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskPoolsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskPoolsDeleteFuture.Result. +func (future *DiskPoolsDeleteFuture) result(client DiskPoolsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.DiskPoolsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DiskPoolsStartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DiskPoolsStartFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskPoolsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskPoolsStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskPoolsStartFuture.Result. +func (future *DiskPoolsStartFuture) result(client DiskPoolsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.DiskPoolsStartFuture") + return + } + ar.Response = future.Response() + return +} + +// DiskPoolsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DiskPoolsUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskPoolsClient) (DiskPool, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskPoolsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskPoolsUpdateFuture.Result. +func (future *DiskPoolsUpdateFuture) result(client DiskPoolsClient) (dp DiskPool, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + dp.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.DiskPoolsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dp.Response.Response, err = future.GetResult(sender); err == nil && dp.Response.Response.StatusCode != http.StatusNoContent { + dp, err = client.UpdateResponder(dp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsUpdateFuture", "Result", dp.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskPoolsUpgradeFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DiskPoolsUpgradeFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskPoolsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskPoolsUpgradeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskPoolsUpgradeFuture.Result. +func (future *DiskPoolsUpgradeFuture) result(client DiskPoolsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.DiskPoolsUpgradeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.DiskPoolsUpgradeFuture") + return + } + ar.Response = future.Response() + return +} + +// DiskPoolUpdate request payload for Update Disk Pool request. +type DiskPoolUpdate struct { + // ManagedBy - Azure resource id. Indicates if this resource is managed by another Azure resource. + ManagedBy *string `json:"managedBy,omitempty"` + // ManagedByExtended - List of Azure resource ids that manage this resource. + ManagedByExtended *[]string `json:"managedByExtended,omitempty"` + // DiskPoolUpdateProperties - Properties for Disk Pool update request. + *DiskPoolUpdateProperties `json:"properties,omitempty"` + // Sku - Determines the SKU of the Disk Pool + Sku *Sku `json:"sku,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DiskPoolUpdate. +func (dpu DiskPoolUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dpu.ManagedBy != nil { + objectMap["managedBy"] = dpu.ManagedBy + } + if dpu.ManagedByExtended != nil { + objectMap["managedByExtended"] = dpu.ManagedByExtended + } + if dpu.DiskPoolUpdateProperties != nil { + objectMap["properties"] = dpu.DiskPoolUpdateProperties + } + if dpu.Sku != nil { + objectMap["sku"] = dpu.Sku + } + if dpu.Tags != nil { + objectMap["tags"] = dpu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskPoolUpdate struct. +func (dpu *DiskPoolUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + dpu.ManagedBy = &managedBy + } + case "managedByExtended": + if v != nil { + var managedByExtended []string + err = json.Unmarshal(*v, &managedByExtended) + if err != nil { + return err + } + dpu.ManagedByExtended = &managedByExtended + } + case "properties": + if v != nil { + var diskPoolUpdateProperties DiskPoolUpdateProperties + err = json.Unmarshal(*v, &diskPoolUpdateProperties) + if err != nil { + return err + } + dpu.DiskPoolUpdateProperties = &diskPoolUpdateProperties + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + dpu.Sku = &sku + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + dpu.Tags = tags + } + } + } + + return nil +} + +// DiskPoolUpdateProperties properties for Disk Pool update request. +type DiskPoolUpdateProperties struct { + // Disks - List of Azure Managed Disks to attach to a Disk Pool. + Disks *[]Disk `json:"disks,omitempty"` +} + +// DiskPoolZoneInfo disk Pool SKU Details +type DiskPoolZoneInfo struct { + // AvailabilityZones - READ-ONLY; Logical zone for Disk Pool resource; example: ["1"]. + AvailabilityZones *[]string `json:"availabilityZones,omitempty"` + // AdditionalCapabilities - READ-ONLY; List of additional capabilities for Disk Pool. + AdditionalCapabilities *[]string `json:"additionalCapabilities,omitempty"` + // Sku - READ-ONLY; Determines the SKU of VM deployed for Disk Pool + Sku *Sku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskPoolZoneInfo. +func (dpzi DiskPoolZoneInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DiskPoolZoneListResult list Disk Pool skus operation response. +type DiskPoolZoneListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The list of Disk Pool Skus. + Value *[]DiskPoolZoneInfo `json:"value,omitempty"` + // NextLink - READ-ONLY; URI to fetch the next section of the paginated response. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskPoolZoneListResult. +func (dpzlr DiskPoolZoneListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DiskPoolZoneListResultIterator provides access to a complete listing of DiskPoolZoneInfo values. +type DiskPoolZoneListResultIterator struct { + i int + page DiskPoolZoneListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskPoolZoneListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolZoneListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DiskPoolZoneListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskPoolZoneListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskPoolZoneListResultIterator) Response() DiskPoolZoneListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskPoolZoneListResultIterator) Value() DiskPoolZoneInfo { + if !iter.page.NotDone() { + return DiskPoolZoneInfo{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DiskPoolZoneListResultIterator type. +func NewDiskPoolZoneListResultIterator(page DiskPoolZoneListResultPage) DiskPoolZoneListResultIterator { + return DiskPoolZoneListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dpzlr DiskPoolZoneListResult) IsEmpty() bool { + return dpzlr.Value == nil || len(*dpzlr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dpzlr DiskPoolZoneListResult) hasNextLink() bool { + return dpzlr.NextLink != nil && len(*dpzlr.NextLink) != 0 +} + +// diskPoolZoneListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dpzlr DiskPoolZoneListResult) diskPoolZoneListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dpzlr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dpzlr.NextLink))) +} + +// DiskPoolZoneListResultPage contains a page of DiskPoolZoneInfo values. +type DiskPoolZoneListResultPage struct { + fn func(context.Context, DiskPoolZoneListResult) (DiskPoolZoneListResult, error) + dpzlr DiskPoolZoneListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskPoolZoneListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskPoolZoneListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dpzlr) + if err != nil { + return err + } + page.dpzlr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskPoolZoneListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskPoolZoneListResultPage) NotDone() bool { + return !page.dpzlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskPoolZoneListResultPage) Response() DiskPoolZoneListResult { + return page.dpzlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskPoolZoneListResultPage) Values() []DiskPoolZoneInfo { + if page.dpzlr.IsEmpty() { + return nil + } + return *page.dpzlr.Value +} + +// Creates a new instance of the DiskPoolZoneListResultPage type. +func NewDiskPoolZoneListResultPage(cur DiskPoolZoneListResult, getNextPage func(context.Context, DiskPoolZoneListResult) (DiskPoolZoneListResult, error)) DiskPoolZoneListResultPage { + return DiskPoolZoneListResultPage{ + fn: getNextPage, + dpzlr: cur, + } +} + +// EndpointDependency a domain name that a service is reached at, including details of the current +// connection status. +type EndpointDependency struct { + // DomainName - The domain name of the dependency. + DomainName *string `json:"domainName,omitempty"` + // EndpointDetails - The IP Addresses and Ports used when connecting to DomainName. + EndpointDetails *[]EndpointDetail `json:"endpointDetails,omitempty"` +} + +// EndpointDetail current TCP connectivity information from the App Service Environment to a single +// endpoint. +type EndpointDetail struct { + // IPAddress - An IP Address that Domain Name currently resolves to. + IPAddress *string `json:"ipAddress,omitempty"` + // Port - The port an endpoint is connected to. + Port *int32 `json:"port,omitempty"` + // Latency - The time in milliseconds it takes for a TCP connection to be created from the App Service Environment to this IpAddress at this Port. + Latency *float64 `json:"latency,omitempty"` + // IsAccessible - Whether it is possible to create a TCP connection from the App Service Environment to this IpAddress at this Port. + IsAccessible *bool `json:"isAccessible,omitempty"` +} + +// Error the resource management error response. +type Error struct { + // Error - RP error response. + Error *ErrorResponse `json:"error,omitempty"` +} + +// ErrorAdditionalInfo the resource management error additional info. +type ErrorAdditionalInfo struct { + // Type - READ-ONLY; The additional info type. + Type *string `json:"type,omitempty"` + // Info - READ-ONLY; The additional info. + Info interface{} `json:"info,omitempty"` +} + +// MarshalJSON is the custom marshaler for ErrorAdditionalInfo. +func (eai ErrorAdditionalInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ErrorResponse the resource management error response. +type ErrorResponse struct { + // Code - READ-ONLY; The error code. + Code *string `json:"code,omitempty"` + // Message - READ-ONLY; The error message. + Message *string `json:"message,omitempty"` + // Target - READ-ONLY; The error target. + Target *string `json:"target,omitempty"` + // Details - READ-ONLY; The error details. + Details *[]ErrorResponse `json:"details,omitempty"` + // AdditionalInfo - READ-ONLY; The error additional info. + AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"` +} + +// MarshalJSON is the custom marshaler for ErrorResponse. +func (er ErrorResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// IscsiLun LUN to expose the Azure Managed Disk. +type IscsiLun struct { + // Name - User defined name for iSCSI LUN; example: "lun0" + Name *string `json:"name,omitempty"` + // ManagedDiskAzureResourceID - Azure Resource ID of the Managed Disk. + ManagedDiskAzureResourceID *string `json:"managedDiskAzureResourceId,omitempty"` + // Lun - READ-ONLY; Specifies the Logical Unit Number of the iSCSI LUN. + Lun *int32 `json:"lun,omitempty"` +} + +// MarshalJSON is the custom marshaler for IscsiLun. +func (il IscsiLun) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if il.Name != nil { + objectMap["name"] = il.Name + } + if il.ManagedDiskAzureResourceID != nil { + objectMap["managedDiskAzureResourceId"] = il.ManagedDiskAzureResourceID + } + return json.Marshal(objectMap) +} + +// IscsiTarget response for iSCSI Target requests. +type IscsiTarget struct { + autorest.Response `json:"-"` + // IscsiTargetProperties - Properties for iSCSI Target operations. + *IscsiTargetProperties `json:"properties,omitempty"` + // SystemData - Resource metadata required by ARM RPC + SystemData *SystemMetadata `json:"systemData,omitempty"` + // ManagedBy - READ-ONLY; Azure resource id. Indicates if this resource is managed by another Azure resource. + ManagedBy *string `json:"managedBy,omitempty"` + // ManagedByExtended - READ-ONLY; List of Azure resource ids that manage this resource. + ManagedByExtended *[]string `json:"managedByExtended,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for IscsiTarget. +func (it IscsiTarget) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if it.IscsiTargetProperties != nil { + objectMap["properties"] = it.IscsiTargetProperties + } + if it.SystemData != nil { + objectMap["systemData"] = it.SystemData + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for IscsiTarget struct. +func (it *IscsiTarget) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var iscsiTargetProperties IscsiTargetProperties + err = json.Unmarshal(*v, &iscsiTargetProperties) + if err != nil { + return err + } + it.IscsiTargetProperties = &iscsiTargetProperties + } + case "systemData": + if v != nil { + var systemData SystemMetadata + err = json.Unmarshal(*v, &systemData) + if err != nil { + return err + } + it.SystemData = &systemData + } + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + it.ManagedBy = &managedBy + } + case "managedByExtended": + if v != nil { + var managedByExtended []string + err = json.Unmarshal(*v, &managedByExtended) + if err != nil { + return err + } + it.ManagedByExtended = &managedByExtended + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + it.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + it.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + it.Type = &typeVar + } + } + } + + return nil +} + +// IscsiTargetCreate payload for iSCSI Target create or update requests. +type IscsiTargetCreate struct { + // IscsiTargetCreateProperties - Properties for iSCSI Target create request. + *IscsiTargetCreateProperties `json:"properties,omitempty"` + // ManagedBy - Azure resource id. Indicates if this resource is managed by another Azure resource. + ManagedBy *string `json:"managedBy,omitempty"` + // ManagedByExtended - List of Azure resource ids that manage this resource. + ManagedByExtended *[]string `json:"managedByExtended,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for IscsiTargetCreate. +func (itc IscsiTargetCreate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if itc.IscsiTargetCreateProperties != nil { + objectMap["properties"] = itc.IscsiTargetCreateProperties + } + if itc.ManagedBy != nil { + objectMap["managedBy"] = itc.ManagedBy + } + if itc.ManagedByExtended != nil { + objectMap["managedByExtended"] = itc.ManagedByExtended + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for IscsiTargetCreate struct. +func (itc *IscsiTargetCreate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var iscsiTargetCreateProperties IscsiTargetCreateProperties + err = json.Unmarshal(*v, &iscsiTargetCreateProperties) + if err != nil { + return err + } + itc.IscsiTargetCreateProperties = &iscsiTargetCreateProperties + } + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + itc.ManagedBy = &managedBy + } + case "managedByExtended": + if v != nil { + var managedByExtended []string + err = json.Unmarshal(*v, &managedByExtended) + if err != nil { + return err + } + itc.ManagedByExtended = &managedByExtended + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + itc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + itc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + itc.Type = &typeVar + } + } + } + + return nil +} + +// IscsiTargetCreateProperties properties for iSCSI Target create or update request. +type IscsiTargetCreateProperties struct { + // ACLMode - Mode for Target connectivity. Possible values include: 'IscsiTargetACLModeDynamic', 'IscsiTargetACLModeStatic' + ACLMode IscsiTargetACLMode `json:"aclMode,omitempty"` + // TargetIqn - iSCSI Target IQN (iSCSI Qualified Name); example: "iqn.2005-03.org.iscsi:server". + TargetIqn *string `json:"targetIqn,omitempty"` + // StaticAcls - Access Control List (ACL) for an iSCSI Target; defines LUN masking policy + StaticAcls *[]ACL `json:"staticAcls,omitempty"` + // Luns - List of LUNs to be exposed through iSCSI Target. + Luns *[]IscsiLun `json:"luns,omitempty"` +} + +// IscsiTargetList list of iSCSI Targets. +type IscsiTargetList struct { + autorest.Response `json:"-"` + // Value - An array of iSCSI Targets in a Disk Pool. + Value *[]IscsiTarget `json:"value,omitempty"` + // NextLink - READ-ONLY; URI to fetch the next section of the paginated response. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for IscsiTargetList. +func (itl IscsiTargetList) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if itl.Value != nil { + objectMap["value"] = itl.Value + } + return json.Marshal(objectMap) +} + +// IscsiTargetListIterator provides access to a complete listing of IscsiTarget values. +type IscsiTargetListIterator struct { + i int + page IscsiTargetListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *IscsiTargetListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IscsiTargetListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *IscsiTargetListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter IscsiTargetListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter IscsiTargetListIterator) Response() IscsiTargetList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter IscsiTargetListIterator) Value() IscsiTarget { + if !iter.page.NotDone() { + return IscsiTarget{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the IscsiTargetListIterator type. +func NewIscsiTargetListIterator(page IscsiTargetListPage) IscsiTargetListIterator { + return IscsiTargetListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (itl IscsiTargetList) IsEmpty() bool { + return itl.Value == nil || len(*itl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (itl IscsiTargetList) hasNextLink() bool { + return itl.NextLink != nil && len(*itl.NextLink) != 0 +} + +// iscsiTargetListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (itl IscsiTargetList) iscsiTargetListPreparer(ctx context.Context) (*http.Request, error) { + if !itl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(itl.NextLink))) +} + +// IscsiTargetListPage contains a page of IscsiTarget values. +type IscsiTargetListPage struct { + fn func(context.Context, IscsiTargetList) (IscsiTargetList, error) + itl IscsiTargetList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *IscsiTargetListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IscsiTargetListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.itl) + if err != nil { + return err + } + page.itl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *IscsiTargetListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page IscsiTargetListPage) NotDone() bool { + return !page.itl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page IscsiTargetListPage) Response() IscsiTargetList { + return page.itl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page IscsiTargetListPage) Values() []IscsiTarget { + if page.itl.IsEmpty() { + return nil + } + return *page.itl.Value +} + +// Creates a new instance of the IscsiTargetListPage type. +func NewIscsiTargetListPage(cur IscsiTargetList, getNextPage func(context.Context, IscsiTargetList) (IscsiTargetList, error)) IscsiTargetListPage { + return IscsiTargetListPage{ + fn: getNextPage, + itl: cur, + } +} + +// IscsiTargetProperties response properties for iSCSI Target operations. +type IscsiTargetProperties struct { + // ACLMode - Mode for Target connectivity. Possible values include: 'IscsiTargetACLModeDynamic', 'IscsiTargetACLModeStatic' + ACLMode IscsiTargetACLMode `json:"aclMode,omitempty"` + // StaticAcls - Access Control List (ACL) for an iSCSI Target; defines LUN masking policy + StaticAcls *[]ACL `json:"staticAcls,omitempty"` + // Luns - List of LUNs to be exposed through iSCSI Target. + Luns *[]IscsiLun `json:"luns,omitempty"` + // TargetIqn - iSCSI Target IQN (iSCSI Qualified Name); example: "iqn.2005-03.org.iscsi:server". + TargetIqn *string `json:"targetIqn,omitempty"` + // ProvisioningState - State of the operation on the resource. Possible values include: 'ProvisioningStatesInvalid', 'ProvisioningStatesSucceeded', 'ProvisioningStatesFailed', 'ProvisioningStatesCanceled', 'ProvisioningStatesPending', 'ProvisioningStatesCreating', 'ProvisioningStatesUpdating', 'ProvisioningStatesDeleting' + ProvisioningState ProvisioningStates `json:"provisioningState,omitempty"` + // Status - Operational status of the iSCSI Target. Possible values include: 'OperationalStatusInvalid', 'OperationalStatusUnknown', 'OperationalStatusHealthy', 'OperationalStatusUnhealthy', 'OperationalStatusUpdating', 'OperationalStatusRunning', 'OperationalStatusStopped', 'OperationalStatusStoppeddeallocated' + Status OperationalStatus `json:"status,omitempty"` + // Endpoints - List of private IPv4 addresses to connect to the iSCSI Target. + Endpoints *[]string `json:"endpoints,omitempty"` + // Port - The port used by iSCSI Target portal group. + Port *int32 `json:"port,omitempty"` + // Sessions - READ-ONLY; List of identifiers for active sessions on the iSCSI target + Sessions *[]string `json:"sessions,omitempty"` +} + +// MarshalJSON is the custom marshaler for IscsiTargetProperties. +func (itp IscsiTargetProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if itp.ACLMode != "" { + objectMap["aclMode"] = itp.ACLMode + } + if itp.StaticAcls != nil { + objectMap["staticAcls"] = itp.StaticAcls + } + if itp.Luns != nil { + objectMap["luns"] = itp.Luns + } + if itp.TargetIqn != nil { + objectMap["targetIqn"] = itp.TargetIqn + } + if itp.ProvisioningState != "" { + objectMap["provisioningState"] = itp.ProvisioningState + } + if itp.Status != "" { + objectMap["status"] = itp.Status + } + if itp.Endpoints != nil { + objectMap["endpoints"] = itp.Endpoints + } + if itp.Port != nil { + objectMap["port"] = itp.Port + } + return json.Marshal(objectMap) +} + +// IscsiTargetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type IscsiTargetsCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(IscsiTargetsClient) (IscsiTarget, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *IscsiTargetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for IscsiTargetsCreateOrUpdateFuture.Result. +func (future *IscsiTargetsCreateOrUpdateFuture) result(client IscsiTargetsClient) (it IscsiTarget, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + it.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.IscsiTargetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if it.Response.Response, err = future.GetResult(sender); err == nil && it.Response.Response.StatusCode != http.StatusNoContent { + it, err = client.CreateOrUpdateResponder(it.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsCreateOrUpdateFuture", "Result", it.Response.Response, "Failure responding to request") + } + } + return +} + +// IscsiTargetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type IscsiTargetsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(IscsiTargetsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *IscsiTargetsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for IscsiTargetsDeleteFuture.Result. +func (future *IscsiTargetsDeleteFuture) result(client IscsiTargetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.IscsiTargetsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// IscsiTargetsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type IscsiTargetsUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(IscsiTargetsClient) (IscsiTarget, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *IscsiTargetsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for IscsiTargetsUpdateFuture.Result. +func (future *IscsiTargetsUpdateFuture) result(client IscsiTargetsClient) (it IscsiTarget, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + it.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storagepool.IscsiTargetsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if it.Response.Response, err = future.GetResult(sender); err == nil && it.Response.Response.StatusCode != http.StatusNoContent { + it, err = client.UpdateResponder(it.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.IscsiTargetsUpdateFuture", "Result", it.Response.Response, "Failure responding to request") + } + } + return +} + +// IscsiTargetUpdate payload for iSCSI Target update requests. +type IscsiTargetUpdate struct { + // IscsiTargetUpdateProperties - Properties for iSCSI Target update request. + *IscsiTargetUpdateProperties `json:"properties,omitempty"` + // ManagedBy - Azure resource id. Indicates if this resource is managed by another Azure resource. + ManagedBy *string `json:"managedBy,omitempty"` + // ManagedByExtended - List of Azure resource ids that manage this resource. + ManagedByExtended *[]string `json:"managedByExtended,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for IscsiTargetUpdate. +func (itu IscsiTargetUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if itu.IscsiTargetUpdateProperties != nil { + objectMap["properties"] = itu.IscsiTargetUpdateProperties + } + if itu.ManagedBy != nil { + objectMap["managedBy"] = itu.ManagedBy + } + if itu.ManagedByExtended != nil { + objectMap["managedByExtended"] = itu.ManagedByExtended + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for IscsiTargetUpdate struct. +func (itu *IscsiTargetUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var iscsiTargetUpdateProperties IscsiTargetUpdateProperties + err = json.Unmarshal(*v, &iscsiTargetUpdateProperties) + if err != nil { + return err + } + itu.IscsiTargetUpdateProperties = &iscsiTargetUpdateProperties + } + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + itu.ManagedBy = &managedBy + } + case "managedByExtended": + if v != nil { + var managedByExtended []string + err = json.Unmarshal(*v, &managedByExtended) + if err != nil { + return err + } + itu.ManagedByExtended = &managedByExtended + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + itu.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + itu.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + itu.Type = &typeVar + } + } + } + + return nil +} + +// IscsiTargetUpdateProperties properties for iSCSI Target update request. +type IscsiTargetUpdateProperties struct { + // StaticAcls - Access Control List (ACL) for an iSCSI Target; defines LUN masking policy + StaticAcls *[]ACL `json:"staticAcls,omitempty"` + // Luns - List of LUNs to be exposed through iSCSI Target. + Luns *[]IscsiLun `json:"luns,omitempty"` +} + +// OperationDisplay metadata about an operation. +type OperationDisplay struct { + // Provider - Localized friendly form of the resource provider name. + Provider *string `json:"provider,omitempty"` + // Resource - Localized friendly form of the resource type related to this action/operation. + Resource *string `json:"resource,omitempty"` + // Operation - Localized friendly name for the operation, as it should be shown to the user. + Operation *string `json:"operation,omitempty"` + // Description - Localized friendly description for the operation, as it should be shown to the user. + Description *string `json:"description,omitempty"` +} + +// OperationListResult list of operations supported by the RP. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - An array of operations supported by the StoragePool RP. + Value *[]RPOperation `json:"value,omitempty"` + // NextLink - URI to fetch the next section of the paginated response. + NextLink *string `json:"nextLink,omitempty"` +} + +// OutboundEnvironmentEndpoint endpoints accessed for a common purpose that the App Service Environment +// requires outbound network access to. +type OutboundEnvironmentEndpoint struct { + // Category - The type of service accessed by the App Service Environment, e.g., Azure Storage, Azure SQL Database, and Azure Active Directory. + Category *string `json:"category,omitempty"` + // Endpoints - The endpoints that the App Service Environment reaches the service at. + Endpoints *[]EndpointDependency `json:"endpoints,omitempty"` +} + +// OutboundEnvironmentEndpointList collection of Outbound Environment Endpoints +type OutboundEnvironmentEndpointList struct { + autorest.Response `json:"-"` + // Value - Collection of resources. + Value *[]OutboundEnvironmentEndpoint `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to next page of resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for OutboundEnvironmentEndpointList. +func (oeel OutboundEnvironmentEndpointList) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if oeel.Value != nil { + objectMap["value"] = oeel.Value + } + return json.Marshal(objectMap) +} + +// OutboundEnvironmentEndpointListIterator provides access to a complete listing of +// OutboundEnvironmentEndpoint values. +type OutboundEnvironmentEndpointListIterator struct { + i int + page OutboundEnvironmentEndpointListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *OutboundEnvironmentEndpointListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OutboundEnvironmentEndpointListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *OutboundEnvironmentEndpointListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter OutboundEnvironmentEndpointListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter OutboundEnvironmentEndpointListIterator) Response() OutboundEnvironmentEndpointList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter OutboundEnvironmentEndpointListIterator) Value() OutboundEnvironmentEndpoint { + if !iter.page.NotDone() { + return OutboundEnvironmentEndpoint{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the OutboundEnvironmentEndpointListIterator type. +func NewOutboundEnvironmentEndpointListIterator(page OutboundEnvironmentEndpointListPage) OutboundEnvironmentEndpointListIterator { + return OutboundEnvironmentEndpointListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (oeel OutboundEnvironmentEndpointList) IsEmpty() bool { + return oeel.Value == nil || len(*oeel.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (oeel OutboundEnvironmentEndpointList) hasNextLink() bool { + return oeel.NextLink != nil && len(*oeel.NextLink) != 0 +} + +// outboundEnvironmentEndpointListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (oeel OutboundEnvironmentEndpointList) outboundEnvironmentEndpointListPreparer(ctx context.Context) (*http.Request, error) { + if !oeel.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(oeel.NextLink))) +} + +// OutboundEnvironmentEndpointListPage contains a page of OutboundEnvironmentEndpoint values. +type OutboundEnvironmentEndpointListPage struct { + fn func(context.Context, OutboundEnvironmentEndpointList) (OutboundEnvironmentEndpointList, error) + oeel OutboundEnvironmentEndpointList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *OutboundEnvironmentEndpointListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OutboundEnvironmentEndpointListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.oeel) + if err != nil { + return err + } + page.oeel = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *OutboundEnvironmentEndpointListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page OutboundEnvironmentEndpointListPage) NotDone() bool { + return !page.oeel.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page OutboundEnvironmentEndpointListPage) Response() OutboundEnvironmentEndpointList { + return page.oeel +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page OutboundEnvironmentEndpointListPage) Values() []OutboundEnvironmentEndpoint { + if page.oeel.IsEmpty() { + return nil + } + return *page.oeel.Value +} + +// Creates a new instance of the OutboundEnvironmentEndpointListPage type. +func NewOutboundEnvironmentEndpointListPage(cur OutboundEnvironmentEndpointList, getNextPage func(context.Context, OutboundEnvironmentEndpointList) (OutboundEnvironmentEndpointList, error)) OutboundEnvironmentEndpointListPage { + return OutboundEnvironmentEndpointListPage{ + fn: getNextPage, + oeel: cur, + } +} + +// ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than +// required location and tags +type ProxyResource struct { + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ProxyResource. +func (pr ProxyResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// Resource ARM resource model definition. +type Resource struct { + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceSkuCapability capability a resource SKU has. +type ResourceSkuCapability struct { + // Name - READ-ONLY; Capability name + Name *string `json:"name,omitempty"` + // Value - READ-ONLY; Capability value + Value *string `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceSkuCapability. +func (rsc ResourceSkuCapability) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceSkuInfo resource SKU Details +type ResourceSkuInfo struct { + // APIVersion - READ-ONLY; StoragePool RP API version + APIVersion *string `json:"apiVersion,omitempty"` + // ResourceType - READ-ONLY; StoragePool resource type + ResourceType *string `json:"resourceType,omitempty"` + // Capabilities - READ-ONLY; List of additional capabilities for StoragePool resource. + Capabilities *[]ResourceSkuCapability `json:"capabilities,omitempty"` + // LocationInfo - READ-ONLY; Zones and zone capabilities in those locations where the SKU is available. + LocationInfo *ResourceSkuLocationInfo `json:"locationInfo,omitempty"` + // Name - READ-ONLY; Sku name + Name *string `json:"name,omitempty"` + // Tier - READ-ONLY; Sku tier + Tier *string `json:"tier,omitempty"` + // Restrictions - READ-ONLY; The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceSkuInfo. +func (rsi ResourceSkuInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceSkuListResult list Disk Pool skus operation response. +type ResourceSkuListResult struct { + autorest.Response `json:"-"` + // Value - The list of StoragePool resource skus. + Value *[]ResourceSkuInfo `json:"value,omitempty"` + // NextLink - URI to fetch the next section of the paginated response. + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceSkuListResultIterator provides access to a complete listing of ResourceSkuInfo values. +type ResourceSkuListResultIterator struct { + i int + page ResourceSkuListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ResourceSkuListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkuListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ResourceSkuListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ResourceSkuListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ResourceSkuListResultIterator) Response() ResourceSkuListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ResourceSkuListResultIterator) Value() ResourceSkuInfo { + if !iter.page.NotDone() { + return ResourceSkuInfo{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ResourceSkuListResultIterator type. +func NewResourceSkuListResultIterator(page ResourceSkuListResultPage) ResourceSkuListResultIterator { + return ResourceSkuListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rslr ResourceSkuListResult) IsEmpty() bool { + return rslr.Value == nil || len(*rslr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (rslr ResourceSkuListResult) hasNextLink() bool { + return rslr.NextLink != nil && len(*rslr.NextLink) != 0 +} + +// resourceSkuListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rslr ResourceSkuListResult) resourceSkuListResultPreparer(ctx context.Context) (*http.Request, error) { + if !rslr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rslr.NextLink))) +} + +// ResourceSkuListResultPage contains a page of ResourceSkuInfo values. +type ResourceSkuListResultPage struct { + fn func(context.Context, ResourceSkuListResult) (ResourceSkuListResult, error) + rslr ResourceSkuListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ResourceSkuListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkuListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.rslr) + if err != nil { + return err + } + page.rslr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ResourceSkuListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ResourceSkuListResultPage) NotDone() bool { + return !page.rslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ResourceSkuListResultPage) Response() ResourceSkuListResult { + return page.rslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ResourceSkuListResultPage) Values() []ResourceSkuInfo { + if page.rslr.IsEmpty() { + return nil + } + return *page.rslr.Value +} + +// Creates a new instance of the ResourceSkuListResultPage type. +func NewResourceSkuListResultPage(cur ResourceSkuListResult, getNextPage func(context.Context, ResourceSkuListResult) (ResourceSkuListResult, error)) ResourceSkuListResultPage { + return ResourceSkuListResultPage{ + fn: getNextPage, + rslr: cur, + } +} + +// ResourceSkuLocationInfo zone and capability info for resource sku +type ResourceSkuLocationInfo struct { + // Location - READ-ONLY; Location of the SKU + Location *string `json:"location,omitempty"` + // Zones - READ-ONLY; List of availability zones where the SKU is supported. + Zones *[]string `json:"zones,omitempty"` + // ZoneDetails - READ-ONLY; Details of capabilities available to a SKU in specific zones. + ZoneDetails *[]ResourceSkuZoneDetails `json:"zoneDetails,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceSkuLocationInfo. +func (rsli ResourceSkuLocationInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceSkuRestrictionInfo describes an available Compute SKU Restriction Information. +type ResourceSkuRestrictionInfo struct { + // Locations - READ-ONLY; Locations where the SKU is restricted + Locations *[]string `json:"locations,omitempty"` + // Zones - READ-ONLY; List of availability zones where the SKU is restricted. + Zones *[]string `json:"zones,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceSkuRestrictionInfo. +func (rsri ResourceSkuRestrictionInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceSkuRestrictions describes scaling information of a SKU. +type ResourceSkuRestrictions struct { + // Type - READ-ONLY; The type of restrictions. Possible values include: 'ResourceSkuRestrictionsTypeLocation', 'ResourceSkuRestrictionsTypeZone' + Type ResourceSkuRestrictionsType `json:"type,omitempty"` + // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. + Values *[]string `json:"values,omitempty"` + // RestrictionInfo - READ-ONLY; The information about the restriction where the SKU cannot be used. + RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"` + // ReasonCode - READ-ONLY; The reason for restriction. Possible values include: 'ResourceSkuRestrictionsReasonCodeQuotaID', 'ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription' + ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceSkuRestrictions. +func (rsr ResourceSkuRestrictions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceSkuZoneDetails describes The zonal capabilities of a SKU. +type ResourceSkuZoneDetails struct { + // Name - READ-ONLY; The set of zones that the SKU is available in with the specified capabilities. + Name *[]string `json:"name,omitempty"` + // Capabilities - READ-ONLY; A list of capabilities that are available for the SKU in the specified list of zones. + Capabilities *[]ResourceSkuCapability `json:"capabilities,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceSkuZoneDetails. +func (rszd ResourceSkuZoneDetails) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// RPOperation description of a StoragePool RP Operation +type RPOperation struct { + // Name - The name of the operation being performed on this particular object + Name *string `json:"name,omitempty"` + // IsDataAction - Indicates whether the operation applies to data-plane. + IsDataAction *bool `json:"isDataAction,omitempty"` + // ActionType - Indicates the action type. + ActionType *string `json:"actionType,omitempty"` + // Display - Additional metadata about RP operation. + Display *OperationDisplay `json:"display,omitempty"` + // Origin - The intended executor of the operation; governs the display of the operation in the RBAC UX and the audit logs UX. + Origin *string `json:"origin,omitempty"` +} + +// Sku sku for ARM resource +type Sku struct { + // Name - Sku name + Name *string `json:"name,omitempty"` + // Tier - Sku tier + Tier *string `json:"tier,omitempty"` +} + +// SystemMetadata metadata pertaining to creation and last modification of the resource. +type SystemMetadata struct { + // CreatedBy - The identity that created the resource. + CreatedBy *string `json:"createdBy,omitempty"` + // CreatedByType - The type of identity that created the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey' + CreatedByType CreatedByType `json:"createdByType,omitempty"` + // CreatedAt - The timestamp of resource creation (UTC). + CreatedAt *date.Time `json:"createdAt,omitempty"` + // LastModifiedBy - The identity that last modified the resource. + LastModifiedBy *string `json:"lastModifiedBy,omitempty"` + // LastModifiedByType - The type of identity that last modified the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey' + LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"` + // LastModifiedAt - The type of identity that last modified the resource. + LastModifiedAt *date.Time `json:"lastModifiedAt,omitempty"` +} + +// TrackedResource the resource model definition for a ARM tracked top level resource. +type TrackedResource struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives. + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TrackedResource. +func (tr TrackedResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tr.Tags != nil { + objectMap["tags"] = tr.Tags + } + if tr.Location != nil { + objectMap["location"] = tr.Location + } + return json.Marshal(objectMap) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/operations.go new file mode 100644 index 000000000000..2887e0571963 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/operations.go @@ -0,0 +1,98 @@ +package storagepool + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// OperationsClient is the client for the Operations methods of the Storagepool service. +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets a list of StoragePool operations. +func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.OperationsClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.StoragePool/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/resourceskus.go new file mode 100644 index 000000000000..8578563a472b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/resourceskus.go @@ -0,0 +1,154 @@ +package storagepool + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ResourceSkusClient is the client for the ResourceSkus methods of the Storagepool service. +type ResourceSkusClient struct { + BaseClient +} + +// NewResourceSkusClient creates an instance of the ResourceSkusClient client. +func NewResourceSkusClient(subscriptionID string) ResourceSkusClient { + return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient { + return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists available StoragePool resources and skus in an Azure location. +// Parameters: +// location - the location of the resource. +func (client ResourceSkusClient) List(ctx context.Context, location string) (result ResourceSkuListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") + defer func() { + sc := -1 + if result.rslr.Response.Response != nil { + sc = result.rslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storagepool.ResourceSkusClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.ResourceSkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storagepool.ResourceSkusClient", "List", resp, "Failure sending request") + return + } + + result.rslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.ResourceSkusClient", "List", resp, "Failure responding to request") + return + } + if result.rslr.hasNextLink() && result.rslr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client ResourceSkusClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.StoragePool/locations/{location}/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkuListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ResourceSkusClient) listNextResults(ctx context.Context, lastResults ResourceSkuListResult) (result ResourceSkuListResult, err error) { + req, err := lastResults.resourceSkuListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storagepool.ResourceSkusClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storagepool.ResourceSkusClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storagepool.ResourceSkusClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ResourceSkusClient) ListComplete(ctx context.Context, location string) (result ResourceSkuListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, location) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/version.go new file mode 100644 index 000000000000..b59659a1cd98 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool/version.go @@ -0,0 +1,19 @@ +package storagepool + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + Version() + " storagepool/2021-08-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7f26b4f11417..cc039e93d598 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -116,6 +116,7 @@ github.com/Azure/azure-sdk-for-go/services/servicefabric/mgmt/2021-06-01/service github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage github.com/Azure/azure-sdk-for-go/services/storagecache/mgmt/2021-03-01/storagecache +github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool github.com/Azure/azure-sdk-for-go/services/storagesync/mgmt/2020-03-01/storagesync github.com/Azure/azure-sdk-for-go/services/subscription/mgmt/2020-09-01/subscription github.com/Azure/azure-sdk-for-go/services/synapse/mgmt/2021-03-01/synapse diff --git a/website/docs/r/storage_disks_pool.html.markdown b/website/docs/r/storage_disks_pool.html.markdown new file mode 100644 index 000000000000..857878f19383 --- /dev/null +++ b/website/docs/r/storage_disks_pool.html.markdown @@ -0,0 +1,96 @@ +--- +subcategory: "Storage" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_storage_disks_pool" +description: |- + Manages a Disks Pool. +--- + +# azurerm_storage_disks_pool + +Manages a Disks Pool. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example" + location = "West Europe" +} + +resource "azurerm_virtual_network" "example" { + name = "example-network" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + address_space = ["10.0.0.0/16"] +} + +resource "azurerm_subnet" "example" { + name = "example-subnet" + resource_group_name = azurerm_virtual_network.example.resource_group_name + virtual_network_name = azurerm_virtual_network.example.name + address_prefixes = ["10.0.0.0/24"] + delegation { + name = "diskspool" + service_delegation { + actions = ["Microsoft.Network/virtualNetworks/read"] + name = "Microsoft.StoragePool/diskPools" + } + } +} + +resource "azurerm_storage_disks_pool" "example" { + name = "example" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + subnet_id = azurerm_subnet.example.id + availability_zones = ["1"] + sku_name = "Basic_B1" + tags = { + foo = "bar" + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Disks Pool. The name must begin with a letter or number, end with a letter, number or underscore, and may contain only letters, numbers, underscores, periods, or hyphens, and length should be in the range [7 - 30]. Changing this forces a new Disks Pool to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the Disks Pool should exist. Changing this forces a new Disks Pool to be created. + +* `location` - (Required) The Azure Region where the Disks Pool should exist. Changing this forces a new Disks Pool to be created. + +* `availability_zones` - (Required) Specifies a list of logical zone (e.g. `["1"]`). Changing this forces a new Disks Pool to be created. + +* `sku_name` - (Required) The sku name of the Disk Pool. Possible values are "Basic_B1", "Standard_S1" and "Premium_P1". + +* `subnet_id` - (Required) The ID of the Subnet for the Disk Pool. Changing this forces a new Disks Pool to be created. + +--- + +* `tags` - (Optional) A mapping of tags which should be assigned to the Disks Pool. + +## Attributes Reference + +In addition to the Arguments listed above - the following Attributes are exported: + +* `id` - The Resource ID of the Disks Pool. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the Disks Pool. +* `read` - (Defaults to 5 minutes) Used when retrieving the Disks Pool. +* `update` - (Defaults to 30 minutes) Used when updating the Disks Pool. +* `delete` - (Defaults to 30 minutes) Used when deleting the Disks Pool. + +## Import + +Disks Pool can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_storage_disks_pool.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/disksPool1 +```